Rename function.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob0013b39d0a801cd2a3e0770a0f43547e49d35157
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "common/common-target.h"
56 #include "langhooks.h"
57 #include "reload.h"
58 #include "sched-int.h"
59 #include "gimplify.h"
60 #include "gimple-fold.h"
61 #include "gimple-iterator.h"
62 #include "gimple-ssa.h"
63 #include "gimple-walk.h"
64 #include "intl.h"
65 #include "params.h"
66 #include "tm-constrs.h"
67 #include "tree-vectorizer.h"
68 #include "target-globals.h"
69 #include "builtins.h"
70 #include "tree-vector-builder.h"
71 #include "context.h"
72 #include "tree-pass.h"
73 #include "except.h"
74 #if TARGET_XCOFF
75 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #endif
77 #include "case-cfn-macros.h"
78 #include "ppc-auxv.h"
79 #include "tree-ssa-propagate.h"
80 #include "tree-vrp.h"
81 #include "tree-ssanames.h"
82 #include "rs6000-internal.h"
84 /* This file should be included last. */
85 #include "target-def.h"
87 #ifndef TARGET_NO_PROTOTYPE
88 #define TARGET_NO_PROTOTYPE 0
89 #endif
91 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
92 systems will also set long double to be IEEE 128-bit. AIX and Darwin
93 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
94 those systems will not pick up this default. This needs to be after all
95 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
96 properly defined. */
97 #ifndef TARGET_IEEEQUAD_DEFAULT
98 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
99 #define TARGET_IEEEQUAD_DEFAULT 1
100 #else
101 #define TARGET_IEEEQUAD_DEFAULT 0
102 #endif
103 #endif
105 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107 /* Support targetm.vectorize.builtin_mask_for_load. */
108 static GTY(()) tree altivec_builtin_mask_for_load;
110 /* Set to nonzero once AIX common-mode calls have been defined. */
111 static GTY(()) int common_mode_defined;
113 #ifdef USING_ELFOS_H
114 /* Counter for labels which are to be placed in .fixup. */
115 int fixuplabelno = 0;
116 #endif
118 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
119 int dot_symbols;
121 /* Specify the machine mode that pointers have. After generation of rtl, the
122 compiler makes no further distinction between pointers and any other objects
123 of this machine mode. */
124 scalar_int_mode rs6000_pmode;
126 #if TARGET_ELF
127 /* Note whether IEEE 128-bit floating point was passed or returned, either as
128 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
129 floating point. We changed the default C++ mangling for these types and we
130 may want to generate a weak alias of the old mangling (U10__float128) to the
131 new mangling (u9__ieee128). */
132 static bool rs6000_passes_ieee128;
133 #endif
135 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
136 name used in current releases (i.e. u9__ieee128). */
137 static bool ieee128_mangling_gcc_8_1;
139 /* Width in bits of a pointer. */
140 unsigned rs6000_pointer_size;
142 #ifdef HAVE_AS_GNU_ATTRIBUTE
143 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
144 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
145 # endif
146 /* Flag whether floating point values have been passed/returned.
147 Note that this doesn't say whether fprs are used, since the
148 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
149 should be set for soft-float values passed in gprs and ieee128
150 values passed in vsx registers. */
151 static bool rs6000_passes_float;
152 static bool rs6000_passes_long_double;
153 /* Flag whether vector values have been passed/returned. */
154 static bool rs6000_passes_vector;
155 /* Flag whether small (<= 8 byte) structures have been returned. */
156 static bool rs6000_returns_struct;
157 #endif
159 /* Value is TRUE if register/mode pair is acceptable. */
160 static bool rs6000_hard_regno_mode_ok_p
161 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
163 /* Maximum number of registers needed for a given register class and mode. */
164 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
166 /* How many registers are needed for a given register and mode. */
167 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
169 /* Map register number to register class. */
170 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
172 static int dbg_cost_ctrl;
174 /* Built in types. */
175 tree rs6000_builtin_types[RS6000_BTI_MAX];
176 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
178 /* Flag to say the TOC is initialized */
179 int toc_initialized, need_toc_init;
180 char toc_label_name[10];
182 /* Cached value of rs6000_variable_issue. This is cached in
183 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
184 static short cached_can_issue_more;
186 static GTY(()) section *read_only_data_section;
187 static GTY(()) section *private_data_section;
188 static GTY(()) section *tls_data_section;
189 static GTY(()) section *tls_private_data_section;
190 static GTY(()) section *read_only_private_data_section;
191 static GTY(()) section *sdata2_section;
193 extern GTY(()) section *toc_section;
194 section *toc_section = 0;
196 struct builtin_description
198 const HOST_WIDE_INT mask;
199 const enum insn_code icode;
200 const char *const name;
201 const enum rs6000_builtins code;
204 /* Describe the vector unit used for modes. */
205 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
206 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
208 /* Register classes for various constraints that are based on the target
209 switches. */
210 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
212 /* Describe the alignment of a vector. */
213 int rs6000_vector_align[NUM_MACHINE_MODES];
215 /* Map selected modes to types for builtins. */
216 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
218 /* What modes to automatically generate reciprocal divide estimate (fre) and
219 reciprocal sqrt (frsqrte) for. */
220 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
222 /* Masks to determine which reciprocal esitmate instructions to generate
223 automatically. */
224 enum rs6000_recip_mask {
225 RECIP_SF_DIV = 0x001, /* Use divide estimate */
226 RECIP_DF_DIV = 0x002,
227 RECIP_V4SF_DIV = 0x004,
228 RECIP_V2DF_DIV = 0x008,
230 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
231 RECIP_DF_RSQRT = 0x020,
232 RECIP_V4SF_RSQRT = 0x040,
233 RECIP_V2DF_RSQRT = 0x080,
235 /* Various combination of flags for -mrecip=xxx. */
236 RECIP_NONE = 0,
237 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
238 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
239 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
241 RECIP_HIGH_PRECISION = RECIP_ALL,
243 /* On low precision machines like the power5, don't enable double precision
244 reciprocal square root estimate, since it isn't accurate enough. */
245 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
248 /* -mrecip options. */
249 static struct
251 const char *string; /* option name */
252 unsigned int mask; /* mask bits to set */
253 } recip_options[] = {
254 { "all", RECIP_ALL },
255 { "none", RECIP_NONE },
256 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV) },
258 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
259 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
260 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
261 | RECIP_V2DF_RSQRT) },
262 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
263 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
266 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
267 static const struct
269 const char *cpu;
270 unsigned int cpuid;
271 } cpu_is_info[] = {
272 { "power9", PPC_PLATFORM_POWER9 },
273 { "power8", PPC_PLATFORM_POWER8 },
274 { "power7", PPC_PLATFORM_POWER7 },
275 { "power6x", PPC_PLATFORM_POWER6X },
276 { "power6", PPC_PLATFORM_POWER6 },
277 { "power5+", PPC_PLATFORM_POWER5_PLUS },
278 { "power5", PPC_PLATFORM_POWER5 },
279 { "ppc970", PPC_PLATFORM_PPC970 },
280 { "power4", PPC_PLATFORM_POWER4 },
281 { "ppca2", PPC_PLATFORM_PPCA2 },
282 { "ppc476", PPC_PLATFORM_PPC476 },
283 { "ppc464", PPC_PLATFORM_PPC464 },
284 { "ppc440", PPC_PLATFORM_PPC440 },
285 { "ppc405", PPC_PLATFORM_PPC405 },
286 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
289 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
290 static const struct
292 const char *hwcap;
293 int mask;
294 unsigned int id;
295 } cpu_supports_info[] = {
296 /* AT_HWCAP masks. */
297 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
298 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
299 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
300 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
301 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
302 { "booke", PPC_FEATURE_BOOKE, 0 },
303 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
304 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
305 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
306 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
307 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
308 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
309 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
310 { "notb", PPC_FEATURE_NO_TB, 0 },
311 { "pa6t", PPC_FEATURE_PA6T, 0 },
312 { "power4", PPC_FEATURE_POWER4, 0 },
313 { "power5", PPC_FEATURE_POWER5, 0 },
314 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
315 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
316 { "ppc32", PPC_FEATURE_32, 0 },
317 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
318 { "ppc64", PPC_FEATURE_64, 0 },
319 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
320 { "smt", PPC_FEATURE_SMT, 0 },
321 { "spe", PPC_FEATURE_HAS_SPE, 0 },
322 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
323 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
324 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
326 /* AT_HWCAP2 masks. */
327 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
328 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
329 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
330 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
331 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
332 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
333 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
334 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
335 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
336 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
337 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
338 { "darn", PPC_FEATURE2_DARN, 1 },
339 { "scv", PPC_FEATURE2_SCV, 1 }
342 /* On PowerPC, we have a limited number of target clones that we care about
343 which means we can use an array to hold the options, rather than having more
344 elaborate data structures to identify each possible variation. Order the
345 clones from the default to the highest ISA. */
346 enum {
347 CLONE_DEFAULT = 0, /* default clone. */
348 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
349 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
350 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
351 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
352 CLONE_MAX
355 /* Map compiler ISA bits into HWCAP names. */
356 struct clone_map {
357 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
358 const char *name; /* name to use in __builtin_cpu_supports. */
361 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
362 { 0, "" }, /* Default options. */
363 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
364 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
365 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
366 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE
400 /* Map register class to register type. */
401 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
403 /* First/last register type for the 'normal' register types (i.e. general
404 purpose, floating point, altivec, and VSX registers). */
405 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
407 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
410 /* Register classes we care about in secondary reload or go if legitimate
411 address. We only need to worry about GPR, FPR, and Altivec registers here,
412 along an ANY field that is the OR of the 3 register classes. */
414 enum rs6000_reload_reg_type {
415 RELOAD_REG_GPR, /* General purpose registers. */
416 RELOAD_REG_FPR, /* Traditional floating point regs. */
417 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
418 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
419 N_RELOAD_REG
422 /* For setting up register classes, loop through the 3 register classes mapping
423 into real registers, and skip the ANY class, which is just an OR of the
424 bits. */
425 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
426 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
428 /* Map reload register type to a register in the register class. */
429 struct reload_reg_map_type {
430 const char *name; /* Register class name. */
431 int reg; /* Register in the register class. */
434 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
435 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
436 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
437 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
438 { "Any", -1 }, /* RELOAD_REG_ANY. */
441 /* Mask bits for each register class, indexed per mode. Historically the
442 compiler has been more restrictive which types can do PRE_MODIFY instead of
443 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
444 typedef unsigned char addr_mask_type;
446 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
447 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
448 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
449 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
450 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
451 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
452 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
453 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
455 /* Register type masks based on the type, of valid addressing modes. */
456 struct rs6000_reg_addr {
457 enum insn_code reload_load; /* INSN to reload for loading. */
458 enum insn_code reload_store; /* INSN to reload for storing. */
459 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
460 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
461 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
462 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
463 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
466 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
468 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
469 static inline bool
470 mode_supports_pre_incdec_p (machine_mode mode)
472 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
473 != 0);
476 /* Helper function to say whether a mode supports PRE_MODIFY. */
477 static inline bool
478 mode_supports_pre_modify_p (machine_mode mode)
480 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
481 != 0);
484 /* Return true if we have D-form addressing in altivec registers. */
485 static inline bool
486 mode_supports_vmx_dform (machine_mode mode)
488 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
491 /* Return true if we have D-form addressing in VSX registers. This addressing
492 is more limited than normal d-form addressing in that the offset must be
493 aligned on a 16-byte boundary. */
494 static inline bool
495 mode_supports_dq_form (machine_mode mode)
497 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
498 != 0);
501 /* Given that there exists at least one variable that is set (produced)
502 by OUT_INSN and read (consumed) by IN_INSN, return true iff
503 IN_INSN represents one or more memory store operations and none of
504 the variables set by OUT_INSN is used by IN_INSN as the address of a
505 store operation. If either IN_INSN or OUT_INSN does not represent
506 a "single" RTL SET expression (as loosely defined by the
507 implementation of the single_set function) or a PARALLEL with only
508 SETs, CLOBBERs, and USEs inside, this function returns false.
510 This rs6000-specific version of store_data_bypass_p checks for
511 certain conditions that result in assertion failures (and internal
512 compiler errors) in the generic store_data_bypass_p function and
513 returns false rather than calling store_data_bypass_p if one of the
514 problematic conditions is detected. */
517 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
519 rtx out_set, in_set;
520 rtx out_pat, in_pat;
521 rtx out_exp, in_exp;
522 int i, j;
524 in_set = single_set (in_insn);
525 if (in_set)
527 if (MEM_P (SET_DEST (in_set)))
529 out_set = single_set (out_insn);
530 if (!out_set)
532 out_pat = PATTERN (out_insn);
533 if (GET_CODE (out_pat) == PARALLEL)
535 for (i = 0; i < XVECLEN (out_pat, 0); i++)
537 out_exp = XVECEXP (out_pat, 0, i);
538 if ((GET_CODE (out_exp) == CLOBBER)
539 || (GET_CODE (out_exp) == USE))
540 continue;
541 else if (GET_CODE (out_exp) != SET)
542 return false;
548 else
550 in_pat = PATTERN (in_insn);
551 if (GET_CODE (in_pat) != PARALLEL)
552 return false;
554 for (i = 0; i < XVECLEN (in_pat, 0); i++)
556 in_exp = XVECEXP (in_pat, 0, i);
557 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
558 continue;
559 else if (GET_CODE (in_exp) != SET)
560 return false;
562 if (MEM_P (SET_DEST (in_exp)))
564 out_set = single_set (out_insn);
565 if (!out_set)
567 out_pat = PATTERN (out_insn);
568 if (GET_CODE (out_pat) != PARALLEL)
569 return false;
570 for (j = 0; j < XVECLEN (out_pat, 0); j++)
572 out_exp = XVECEXP (out_pat, 0, j);
573 if ((GET_CODE (out_exp) == CLOBBER)
574 || (GET_CODE (out_exp) == USE))
575 continue;
576 else if (GET_CODE (out_exp) != SET)
577 return false;
583 return store_data_bypass_p (out_insn, in_insn);
587 /* Processor costs (relative to an add) */
589 const struct processor_costs *rs6000_cost;
591 /* Instruction size costs on 32bit processors. */
592 static const
593 struct processor_costs size32_cost = {
594 COSTS_N_INSNS (1), /* mulsi */
595 COSTS_N_INSNS (1), /* mulsi_const */
596 COSTS_N_INSNS (1), /* mulsi_const9 */
597 COSTS_N_INSNS (1), /* muldi */
598 COSTS_N_INSNS (1), /* divsi */
599 COSTS_N_INSNS (1), /* divdi */
600 COSTS_N_INSNS (1), /* fp */
601 COSTS_N_INSNS (1), /* dmul */
602 COSTS_N_INSNS (1), /* sdiv */
603 COSTS_N_INSNS (1), /* ddiv */
604 32, /* cache line size */
605 0, /* l1 cache */
606 0, /* l2 cache */
607 0, /* streams */
608 0, /* SF->DF convert */
611 /* Instruction size costs on 64bit processors. */
612 static const
613 struct processor_costs size64_cost = {
614 COSTS_N_INSNS (1), /* mulsi */
615 COSTS_N_INSNS (1), /* mulsi_const */
616 COSTS_N_INSNS (1), /* mulsi_const9 */
617 COSTS_N_INSNS (1), /* muldi */
618 COSTS_N_INSNS (1), /* divsi */
619 COSTS_N_INSNS (1), /* divdi */
620 COSTS_N_INSNS (1), /* fp */
621 COSTS_N_INSNS (1), /* dmul */
622 COSTS_N_INSNS (1), /* sdiv */
623 COSTS_N_INSNS (1), /* ddiv */
624 128, /* cache line size */
625 0, /* l1 cache */
626 0, /* l2 cache */
627 0, /* streams */
628 0, /* SF->DF convert */
631 /* Instruction costs on RS64A processors. */
632 static const
633 struct processor_costs rs64a_cost = {
634 COSTS_N_INSNS (20), /* mulsi */
635 COSTS_N_INSNS (12), /* mulsi_const */
636 COSTS_N_INSNS (8), /* mulsi_const9 */
637 COSTS_N_INSNS (34), /* muldi */
638 COSTS_N_INSNS (65), /* divsi */
639 COSTS_N_INSNS (67), /* divdi */
640 COSTS_N_INSNS (4), /* fp */
641 COSTS_N_INSNS (4), /* dmul */
642 COSTS_N_INSNS (31), /* sdiv */
643 COSTS_N_INSNS (31), /* ddiv */
644 128, /* cache line size */
645 128, /* l1 cache */
646 2048, /* l2 cache */
647 1, /* streams */
648 0, /* SF->DF convert */
651 /* Instruction costs on MPCCORE processors. */
652 static const
653 struct processor_costs mpccore_cost = {
654 COSTS_N_INSNS (2), /* mulsi */
655 COSTS_N_INSNS (2), /* mulsi_const */
656 COSTS_N_INSNS (2), /* mulsi_const9 */
657 COSTS_N_INSNS (2), /* muldi */
658 COSTS_N_INSNS (6), /* divsi */
659 COSTS_N_INSNS (6), /* divdi */
660 COSTS_N_INSNS (4), /* fp */
661 COSTS_N_INSNS (5), /* dmul */
662 COSTS_N_INSNS (10), /* sdiv */
663 COSTS_N_INSNS (17), /* ddiv */
664 32, /* cache line size */
665 4, /* l1 cache */
666 16, /* l2 cache */
667 1, /* streams */
668 0, /* SF->DF convert */
671 /* Instruction costs on PPC403 processors. */
672 static const
673 struct processor_costs ppc403_cost = {
674 COSTS_N_INSNS (4), /* mulsi */
675 COSTS_N_INSNS (4), /* mulsi_const */
676 COSTS_N_INSNS (4), /* mulsi_const9 */
677 COSTS_N_INSNS (4), /* muldi */
678 COSTS_N_INSNS (33), /* divsi */
679 COSTS_N_INSNS (33), /* divdi */
680 COSTS_N_INSNS (11), /* fp */
681 COSTS_N_INSNS (11), /* dmul */
682 COSTS_N_INSNS (11), /* sdiv */
683 COSTS_N_INSNS (11), /* ddiv */
684 32, /* cache line size */
685 4, /* l1 cache */
686 16, /* l2 cache */
687 1, /* streams */
688 0, /* SF->DF convert */
691 /* Instruction costs on PPC405 processors. */
692 static const
693 struct processor_costs ppc405_cost = {
694 COSTS_N_INSNS (5), /* mulsi */
695 COSTS_N_INSNS (4), /* mulsi_const */
696 COSTS_N_INSNS (3), /* mulsi_const9 */
697 COSTS_N_INSNS (5), /* muldi */
698 COSTS_N_INSNS (35), /* divsi */
699 COSTS_N_INSNS (35), /* divdi */
700 COSTS_N_INSNS (11), /* fp */
701 COSTS_N_INSNS (11), /* dmul */
702 COSTS_N_INSNS (11), /* sdiv */
703 COSTS_N_INSNS (11), /* ddiv */
704 32, /* cache line size */
705 16, /* l1 cache */
706 128, /* l2 cache */
707 1, /* streams */
708 0, /* SF->DF convert */
711 /* Instruction costs on PPC440 processors. */
712 static const
713 struct processor_costs ppc440_cost = {
714 COSTS_N_INSNS (3), /* mulsi */
715 COSTS_N_INSNS (2), /* mulsi_const */
716 COSTS_N_INSNS (2), /* mulsi_const9 */
717 COSTS_N_INSNS (3), /* muldi */
718 COSTS_N_INSNS (34), /* divsi */
719 COSTS_N_INSNS (34), /* divdi */
720 COSTS_N_INSNS (5), /* fp */
721 COSTS_N_INSNS (5), /* dmul */
722 COSTS_N_INSNS (19), /* sdiv */
723 COSTS_N_INSNS (33), /* ddiv */
724 32, /* cache line size */
725 32, /* l1 cache */
726 256, /* l2 cache */
727 1, /* streams */
728 0, /* SF->DF convert */
731 /* Instruction costs on PPC476 processors. */
732 static const
733 struct processor_costs ppc476_cost = {
734 COSTS_N_INSNS (4), /* mulsi */
735 COSTS_N_INSNS (4), /* mulsi_const */
736 COSTS_N_INSNS (4), /* mulsi_const9 */
737 COSTS_N_INSNS (4), /* muldi */
738 COSTS_N_INSNS (11), /* divsi */
739 COSTS_N_INSNS (11), /* divdi */
740 COSTS_N_INSNS (6), /* fp */
741 COSTS_N_INSNS (6), /* dmul */
742 COSTS_N_INSNS (19), /* sdiv */
743 COSTS_N_INSNS (33), /* ddiv */
744 32, /* l1 cache line size */
745 32, /* l1 cache */
746 512, /* l2 cache */
747 1, /* streams */
748 0, /* SF->DF convert */
751 /* Instruction costs on PPC601 processors. */
752 static const
753 struct processor_costs ppc601_cost = {
754 COSTS_N_INSNS (5), /* mulsi */
755 COSTS_N_INSNS (5), /* mulsi_const */
756 COSTS_N_INSNS (5), /* mulsi_const9 */
757 COSTS_N_INSNS (5), /* muldi */
758 COSTS_N_INSNS (36), /* divsi */
759 COSTS_N_INSNS (36), /* divdi */
760 COSTS_N_INSNS (4), /* fp */
761 COSTS_N_INSNS (5), /* dmul */
762 COSTS_N_INSNS (17), /* sdiv */
763 COSTS_N_INSNS (31), /* ddiv */
764 32, /* cache line size */
765 32, /* l1 cache */
766 256, /* l2 cache */
767 1, /* streams */
768 0, /* SF->DF convert */
771 /* Instruction costs on PPC603 processors. */
772 static const
773 struct processor_costs ppc603_cost = {
774 COSTS_N_INSNS (5), /* mulsi */
775 COSTS_N_INSNS (3), /* mulsi_const */
776 COSTS_N_INSNS (2), /* mulsi_const9 */
777 COSTS_N_INSNS (5), /* muldi */
778 COSTS_N_INSNS (37), /* divsi */
779 COSTS_N_INSNS (37), /* divdi */
780 COSTS_N_INSNS (3), /* fp */
781 COSTS_N_INSNS (4), /* dmul */
782 COSTS_N_INSNS (18), /* sdiv */
783 COSTS_N_INSNS (33), /* ddiv */
784 32, /* cache line size */
785 8, /* l1 cache */
786 64, /* l2 cache */
787 1, /* streams */
788 0, /* SF->DF convert */
791 /* Instruction costs on PPC604 processors. */
792 static const
793 struct processor_costs ppc604_cost = {
794 COSTS_N_INSNS (4), /* mulsi */
795 COSTS_N_INSNS (4), /* mulsi_const */
796 COSTS_N_INSNS (4), /* mulsi_const9 */
797 COSTS_N_INSNS (4), /* muldi */
798 COSTS_N_INSNS (20), /* divsi */
799 COSTS_N_INSNS (20), /* divdi */
800 COSTS_N_INSNS (3), /* fp */
801 COSTS_N_INSNS (3), /* dmul */
802 COSTS_N_INSNS (18), /* sdiv */
803 COSTS_N_INSNS (32), /* ddiv */
804 32, /* cache line size */
805 16, /* l1 cache */
806 512, /* l2 cache */
807 1, /* streams */
808 0, /* SF->DF convert */
811 /* Instruction costs on PPC604e processors. */
812 static const
813 struct processor_costs ppc604e_cost = {
814 COSTS_N_INSNS (2), /* mulsi */
815 COSTS_N_INSNS (2), /* mulsi_const */
816 COSTS_N_INSNS (2), /* mulsi_const9 */
817 COSTS_N_INSNS (2), /* muldi */
818 COSTS_N_INSNS (20), /* divsi */
819 COSTS_N_INSNS (20), /* divdi */
820 COSTS_N_INSNS (3), /* fp */
821 COSTS_N_INSNS (3), /* dmul */
822 COSTS_N_INSNS (18), /* sdiv */
823 COSTS_N_INSNS (32), /* ddiv */
824 32, /* cache line size */
825 32, /* l1 cache */
826 1024, /* l2 cache */
827 1, /* streams */
828 0, /* SF->DF convert */
831 /* Instruction costs on PPC620 processors. */
832 static const
833 struct processor_costs ppc620_cost = {
834 COSTS_N_INSNS (5), /* mulsi */
835 COSTS_N_INSNS (4), /* mulsi_const */
836 COSTS_N_INSNS (3), /* mulsi_const9 */
837 COSTS_N_INSNS (7), /* muldi */
838 COSTS_N_INSNS (21), /* divsi */
839 COSTS_N_INSNS (37), /* divdi */
840 COSTS_N_INSNS (3), /* fp */
841 COSTS_N_INSNS (3), /* dmul */
842 COSTS_N_INSNS (18), /* sdiv */
843 COSTS_N_INSNS (32), /* ddiv */
844 128, /* cache line size */
845 32, /* l1 cache */
846 1024, /* l2 cache */
847 1, /* streams */
848 0, /* SF->DF convert */
851 /* Instruction costs on PPC630 processors. */
852 static const
853 struct processor_costs ppc630_cost = {
854 COSTS_N_INSNS (5), /* mulsi */
855 COSTS_N_INSNS (4), /* mulsi_const */
856 COSTS_N_INSNS (3), /* mulsi_const9 */
857 COSTS_N_INSNS (7), /* muldi */
858 COSTS_N_INSNS (21), /* divsi */
859 COSTS_N_INSNS (37), /* divdi */
860 COSTS_N_INSNS (3), /* fp */
861 COSTS_N_INSNS (3), /* dmul */
862 COSTS_N_INSNS (17), /* sdiv */
863 COSTS_N_INSNS (21), /* ddiv */
864 128, /* cache line size */
865 64, /* l1 cache */
866 1024, /* l2 cache */
867 1, /* streams */
868 0, /* SF->DF convert */
871 /* Instruction costs on Cell processor. */
872 /* COSTS_N_INSNS (1) ~ one add. */
873 static const
874 struct processor_costs ppccell_cost = {
875 COSTS_N_INSNS (9/2)+2, /* mulsi */
876 COSTS_N_INSNS (6/2), /* mulsi_const */
877 COSTS_N_INSNS (6/2), /* mulsi_const9 */
878 COSTS_N_INSNS (15/2)+2, /* muldi */
879 COSTS_N_INSNS (38/2), /* divsi */
880 COSTS_N_INSNS (70/2), /* divdi */
881 COSTS_N_INSNS (10/2), /* fp */
882 COSTS_N_INSNS (10/2), /* dmul */
883 COSTS_N_INSNS (74/2), /* sdiv */
884 COSTS_N_INSNS (74/2), /* ddiv */
885 128, /* cache line size */
886 32, /* l1 cache */
887 512, /* l2 cache */
888 6, /* streams */
889 0, /* SF->DF convert */
892 /* Instruction costs on PPC750 and PPC7400 processors. */
893 static const
894 struct processor_costs ppc750_cost = {
895 COSTS_N_INSNS (5), /* mulsi */
896 COSTS_N_INSNS (3), /* mulsi_const */
897 COSTS_N_INSNS (2), /* mulsi_const9 */
898 COSTS_N_INSNS (5), /* muldi */
899 COSTS_N_INSNS (17), /* divsi */
900 COSTS_N_INSNS (17), /* divdi */
901 COSTS_N_INSNS (3), /* fp */
902 COSTS_N_INSNS (3), /* dmul */
903 COSTS_N_INSNS (17), /* sdiv */
904 COSTS_N_INSNS (31), /* ddiv */
905 32, /* cache line size */
906 32, /* l1 cache */
907 512, /* l2 cache */
908 1, /* streams */
909 0, /* SF->DF convert */
912 /* Instruction costs on PPC7450 processors. */
913 static const
914 struct processor_costs ppc7450_cost = {
915 COSTS_N_INSNS (4), /* mulsi */
916 COSTS_N_INSNS (3), /* mulsi_const */
917 COSTS_N_INSNS (3), /* mulsi_const9 */
918 COSTS_N_INSNS (4), /* muldi */
919 COSTS_N_INSNS (23), /* divsi */
920 COSTS_N_INSNS (23), /* divdi */
921 COSTS_N_INSNS (5), /* fp */
922 COSTS_N_INSNS (5), /* dmul */
923 COSTS_N_INSNS (21), /* sdiv */
924 COSTS_N_INSNS (35), /* ddiv */
925 32, /* cache line size */
926 32, /* l1 cache */
927 1024, /* l2 cache */
928 1, /* streams */
929 0, /* SF->DF convert */
932 /* Instruction costs on PPC8540 processors. */
933 static const
934 struct processor_costs ppc8540_cost = {
935 COSTS_N_INSNS (4), /* mulsi */
936 COSTS_N_INSNS (4), /* mulsi_const */
937 COSTS_N_INSNS (4), /* mulsi_const9 */
938 COSTS_N_INSNS (4), /* muldi */
939 COSTS_N_INSNS (19), /* divsi */
940 COSTS_N_INSNS (19), /* divdi */
941 COSTS_N_INSNS (4), /* fp */
942 COSTS_N_INSNS (4), /* dmul */
943 COSTS_N_INSNS (29), /* sdiv */
944 COSTS_N_INSNS (29), /* ddiv */
945 32, /* cache line size */
946 32, /* l1 cache */
947 256, /* l2 cache */
948 1, /* prefetch streams /*/
949 0, /* SF->DF convert */
952 /* Instruction costs on E300C2 and E300C3 cores. */
953 static const
954 struct processor_costs ppce300c2c3_cost = {
955 COSTS_N_INSNS (4), /* mulsi */
956 COSTS_N_INSNS (4), /* mulsi_const */
957 COSTS_N_INSNS (4), /* mulsi_const9 */
958 COSTS_N_INSNS (4), /* muldi */
959 COSTS_N_INSNS (19), /* divsi */
960 COSTS_N_INSNS (19), /* divdi */
961 COSTS_N_INSNS (3), /* fp */
962 COSTS_N_INSNS (4), /* dmul */
963 COSTS_N_INSNS (18), /* sdiv */
964 COSTS_N_INSNS (33), /* ddiv */
966 16, /* l1 cache */
967 16, /* l2 cache */
968 1, /* prefetch streams /*/
969 0, /* SF->DF convert */
972 /* Instruction costs on PPCE500MC processors. */
973 static const
974 struct processor_costs ppce500mc_cost = {
975 COSTS_N_INSNS (4), /* mulsi */
976 COSTS_N_INSNS (4), /* mulsi_const */
977 COSTS_N_INSNS (4), /* mulsi_const9 */
978 COSTS_N_INSNS (4), /* muldi */
979 COSTS_N_INSNS (14), /* divsi */
980 COSTS_N_INSNS (14), /* divdi */
981 COSTS_N_INSNS (8), /* fp */
982 COSTS_N_INSNS (10), /* dmul */
983 COSTS_N_INSNS (36), /* sdiv */
984 COSTS_N_INSNS (66), /* ddiv */
985 64, /* cache line size */
986 32, /* l1 cache */
987 128, /* l2 cache */
988 1, /* prefetch streams /*/
989 0, /* SF->DF convert */
992 /* Instruction costs on PPCE500MC64 processors. */
993 static const
994 struct processor_costs ppce500mc64_cost = {
995 COSTS_N_INSNS (4), /* mulsi */
996 COSTS_N_INSNS (4), /* mulsi_const */
997 COSTS_N_INSNS (4), /* mulsi_const9 */
998 COSTS_N_INSNS (4), /* muldi */
999 COSTS_N_INSNS (14), /* divsi */
1000 COSTS_N_INSNS (14), /* divdi */
1001 COSTS_N_INSNS (4), /* fp */
1002 COSTS_N_INSNS (10), /* dmul */
1003 COSTS_N_INSNS (36), /* sdiv */
1004 COSTS_N_INSNS (66), /* ddiv */
1005 64, /* cache line size */
1006 32, /* l1 cache */
1007 128, /* l2 cache */
1008 1, /* prefetch streams /*/
1009 0, /* SF->DF convert */
1012 /* Instruction costs on PPCE5500 processors. */
1013 static const
1014 struct processor_costs ppce5500_cost = {
1015 COSTS_N_INSNS (5), /* mulsi */
1016 COSTS_N_INSNS (5), /* mulsi_const */
1017 COSTS_N_INSNS (4), /* mulsi_const9 */
1018 COSTS_N_INSNS (5), /* muldi */
1019 COSTS_N_INSNS (14), /* divsi */
1020 COSTS_N_INSNS (14), /* divdi */
1021 COSTS_N_INSNS (7), /* fp */
1022 COSTS_N_INSNS (10), /* dmul */
1023 COSTS_N_INSNS (36), /* sdiv */
1024 COSTS_N_INSNS (66), /* ddiv */
1025 64, /* cache line size */
1026 32, /* l1 cache */
1027 128, /* l2 cache */
1028 1, /* prefetch streams /*/
1029 0, /* SF->DF convert */
1032 /* Instruction costs on PPCE6500 processors. */
1033 static const
1034 struct processor_costs ppce6500_cost = {
1035 COSTS_N_INSNS (5), /* mulsi */
1036 COSTS_N_INSNS (5), /* mulsi_const */
1037 COSTS_N_INSNS (4), /* mulsi_const9 */
1038 COSTS_N_INSNS (5), /* muldi */
1039 COSTS_N_INSNS (14), /* divsi */
1040 COSTS_N_INSNS (14), /* divdi */
1041 COSTS_N_INSNS (7), /* fp */
1042 COSTS_N_INSNS (10), /* dmul */
1043 COSTS_N_INSNS (36), /* sdiv */
1044 COSTS_N_INSNS (66), /* ddiv */
1045 64, /* cache line size */
1046 32, /* l1 cache */
1047 128, /* l2 cache */
1048 1, /* prefetch streams /*/
1049 0, /* SF->DF convert */
1052 /* Instruction costs on AppliedMicro Titan processors. */
1053 static const
1054 struct processor_costs titan_cost = {
1055 COSTS_N_INSNS (5), /* mulsi */
1056 COSTS_N_INSNS (5), /* mulsi_const */
1057 COSTS_N_INSNS (5), /* mulsi_const9 */
1058 COSTS_N_INSNS (5), /* muldi */
1059 COSTS_N_INSNS (18), /* divsi */
1060 COSTS_N_INSNS (18), /* divdi */
1061 COSTS_N_INSNS (10), /* fp */
1062 COSTS_N_INSNS (10), /* dmul */
1063 COSTS_N_INSNS (46), /* sdiv */
1064 COSTS_N_INSNS (72), /* ddiv */
1065 32, /* cache line size */
1066 32, /* l1 cache */
1067 512, /* l2 cache */
1068 1, /* prefetch streams /*/
1069 0, /* SF->DF convert */
1072 /* Instruction costs on POWER4 and POWER5 processors. */
1073 static const
1074 struct processor_costs power4_cost = {
1075 COSTS_N_INSNS (3), /* mulsi */
1076 COSTS_N_INSNS (2), /* mulsi_const */
1077 COSTS_N_INSNS (2), /* mulsi_const9 */
1078 COSTS_N_INSNS (4), /* muldi */
1079 COSTS_N_INSNS (18), /* divsi */
1080 COSTS_N_INSNS (34), /* divdi */
1081 COSTS_N_INSNS (3), /* fp */
1082 COSTS_N_INSNS (3), /* dmul */
1083 COSTS_N_INSNS (17), /* sdiv */
1084 COSTS_N_INSNS (17), /* ddiv */
1085 128, /* cache line size */
1086 32, /* l1 cache */
1087 1024, /* l2 cache */
1088 8, /* prefetch streams /*/
1089 0, /* SF->DF convert */
1092 /* Instruction costs on POWER6 processors. */
1093 static const
1094 struct processor_costs power6_cost = {
1095 COSTS_N_INSNS (8), /* mulsi */
1096 COSTS_N_INSNS (8), /* mulsi_const */
1097 COSTS_N_INSNS (8), /* mulsi_const9 */
1098 COSTS_N_INSNS (8), /* muldi */
1099 COSTS_N_INSNS (22), /* divsi */
1100 COSTS_N_INSNS (28), /* divdi */
1101 COSTS_N_INSNS (3), /* fp */
1102 COSTS_N_INSNS (3), /* dmul */
1103 COSTS_N_INSNS (13), /* sdiv */
1104 COSTS_N_INSNS (16), /* ddiv */
1105 128, /* cache line size */
1106 64, /* l1 cache */
1107 2048, /* l2 cache */
1108 16, /* prefetch streams */
1109 0, /* SF->DF convert */
1112 /* Instruction costs on POWER7 processors. */
1113 static const
1114 struct processor_costs power7_cost = {
1115 COSTS_N_INSNS (2), /* mulsi */
1116 COSTS_N_INSNS (2), /* mulsi_const */
1117 COSTS_N_INSNS (2), /* mulsi_const9 */
1118 COSTS_N_INSNS (2), /* muldi */
1119 COSTS_N_INSNS (18), /* divsi */
1120 COSTS_N_INSNS (34), /* divdi */
1121 COSTS_N_INSNS (3), /* fp */
1122 COSTS_N_INSNS (3), /* dmul */
1123 COSTS_N_INSNS (13), /* sdiv */
1124 COSTS_N_INSNS (16), /* ddiv */
1125 128, /* cache line size */
1126 32, /* l1 cache */
1127 256, /* l2 cache */
1128 12, /* prefetch streams */
1129 COSTS_N_INSNS (3), /* SF->DF convert */
1132 /* Instruction costs on POWER8 processors. */
1133 static const
1134 struct processor_costs power8_cost = {
1135 COSTS_N_INSNS (3), /* mulsi */
1136 COSTS_N_INSNS (3), /* mulsi_const */
1137 COSTS_N_INSNS (3), /* mulsi_const9 */
1138 COSTS_N_INSNS (3), /* muldi */
1139 COSTS_N_INSNS (19), /* divsi */
1140 COSTS_N_INSNS (35), /* divdi */
1141 COSTS_N_INSNS (3), /* fp */
1142 COSTS_N_INSNS (3), /* dmul */
1143 COSTS_N_INSNS (14), /* sdiv */
1144 COSTS_N_INSNS (17), /* ddiv */
1145 128, /* cache line size */
1146 32, /* l1 cache */
1147 256, /* l2 cache */
1148 12, /* prefetch streams */
1149 COSTS_N_INSNS (3), /* SF->DF convert */
1152 /* Instruction costs on POWER9 processors. */
1153 static const
1154 struct processor_costs power9_cost = {
1155 COSTS_N_INSNS (3), /* mulsi */
1156 COSTS_N_INSNS (3), /* mulsi_const */
1157 COSTS_N_INSNS (3), /* mulsi_const9 */
1158 COSTS_N_INSNS (3), /* muldi */
1159 COSTS_N_INSNS (8), /* divsi */
1160 COSTS_N_INSNS (12), /* divdi */
1161 COSTS_N_INSNS (3), /* fp */
1162 COSTS_N_INSNS (3), /* dmul */
1163 COSTS_N_INSNS (13), /* sdiv */
1164 COSTS_N_INSNS (18), /* ddiv */
1165 128, /* cache line size */
1166 32, /* l1 cache */
1167 512, /* l2 cache */
1168 8, /* prefetch streams */
1169 COSTS_N_INSNS (3), /* SF->DF convert */
1172 /* Instruction costs on POWER A2 processors. */
1173 static const
1174 struct processor_costs ppca2_cost = {
1175 COSTS_N_INSNS (16), /* mulsi */
1176 COSTS_N_INSNS (16), /* mulsi_const */
1177 COSTS_N_INSNS (16), /* mulsi_const9 */
1178 COSTS_N_INSNS (16), /* muldi */
1179 COSTS_N_INSNS (22), /* divsi */
1180 COSTS_N_INSNS (28), /* divdi */
1181 COSTS_N_INSNS (3), /* fp */
1182 COSTS_N_INSNS (3), /* dmul */
1183 COSTS_N_INSNS (59), /* sdiv */
1184 COSTS_N_INSNS (72), /* ddiv */
1186 16, /* l1 cache */
1187 2048, /* l2 cache */
1188 16, /* prefetch streams */
1189 0, /* SF->DF convert */
1193 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1194 #undef RS6000_BUILTIN_0
1195 #undef RS6000_BUILTIN_1
1196 #undef RS6000_BUILTIN_2
1197 #undef RS6000_BUILTIN_3
1198 #undef RS6000_BUILTIN_A
1199 #undef RS6000_BUILTIN_D
1200 #undef RS6000_BUILTIN_H
1201 #undef RS6000_BUILTIN_P
1202 #undef RS6000_BUILTIN_X
1204 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1205 { NAME, ICODE, MASK, ATTR },
1207 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1208 { NAME, ICODE, MASK, ATTR },
1210 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1211 { NAME, ICODE, MASK, ATTR },
1213 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1214 { NAME, ICODE, MASK, ATTR },
1216 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1217 { NAME, ICODE, MASK, ATTR },
1219 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1220 { NAME, ICODE, MASK, ATTR },
1222 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1223 { NAME, ICODE, MASK, ATTR },
1225 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1226 { NAME, ICODE, MASK, ATTR },
1228 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1229 { NAME, ICODE, MASK, ATTR },
1231 struct rs6000_builtin_info_type {
1232 const char *name;
1233 const enum insn_code icode;
1234 const HOST_WIDE_INT mask;
1235 const unsigned attr;
1238 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1240 #include "rs6000-builtin.def"
1243 #undef RS6000_BUILTIN_0
1244 #undef RS6000_BUILTIN_1
1245 #undef RS6000_BUILTIN_2
1246 #undef RS6000_BUILTIN_3
1247 #undef RS6000_BUILTIN_A
1248 #undef RS6000_BUILTIN_D
1249 #undef RS6000_BUILTIN_H
1250 #undef RS6000_BUILTIN_P
1251 #undef RS6000_BUILTIN_X
1253 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1254 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1257 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1258 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1259 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1260 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1261 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1262 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1263 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1264 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1265 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1266 bool);
1267 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1268 unsigned int);
1269 static bool is_microcoded_insn (rtx_insn *);
1270 static bool is_nonpipeline_insn (rtx_insn *);
1271 static bool is_cracked_insn (rtx_insn *);
1272 static bool is_load_insn (rtx, rtx *);
1273 static bool is_store_insn (rtx, rtx *);
1274 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1275 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1276 static bool insn_must_be_first_in_group (rtx_insn *);
1277 static bool insn_must_be_last_in_group (rtx_insn *);
1278 static void altivec_init_builtins (void);
1279 static tree builtin_function_type (machine_mode, machine_mode,
1280 machine_mode, machine_mode,
1281 enum rs6000_builtins, const char *name);
1282 static void rs6000_common_init_builtins (void);
1283 static void htm_init_builtins (void);
1284 int easy_vector_constant (rtx, machine_mode);
1285 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1286 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1287 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1288 bool, bool);
1289 #if TARGET_MACHO
1290 static tree get_prev_label (tree);
1291 #endif
1292 static bool rs6000_mode_dependent_address (const_rtx);
1293 static bool rs6000_debug_mode_dependent_address (const_rtx);
1294 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1295 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1296 machine_mode, rtx);
1297 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1298 machine_mode,
1299 rtx);
1300 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1301 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1302 enum reg_class);
1303 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1304 reg_class_t,
1305 reg_class_t);
1306 static bool rs6000_debug_can_change_mode_class (machine_mode,
1307 machine_mode,
1308 reg_class_t);
1309 static rtx rs6000_internal_arg_pointer (void);
1311 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1312 = rs6000_mode_dependent_address;
1314 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1315 machine_mode, rtx)
1316 = rs6000_secondary_reload_class;
1318 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1319 = rs6000_preferred_reload_class;
1321 const int INSN_NOT_AVAILABLE = -1;
1323 static void rs6000_print_isa_options (FILE *, int, const char *,
1324 HOST_WIDE_INT);
1325 static void rs6000_print_builtin_options (FILE *, int, const char *,
1326 HOST_WIDE_INT);
1327 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1329 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1330 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1331 enum rs6000_reg_type,
1332 machine_mode,
1333 secondary_reload_info *,
1334 bool);
1335 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1336 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1338 /* Hash table stuff for keeping track of TOC entries. */
1340 struct GTY((for_user)) toc_hash_struct
1342 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1343 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1344 rtx key;
1345 machine_mode key_mode;
1346 int labelno;
1349 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1351 static hashval_t hash (toc_hash_struct *);
1352 static bool equal (toc_hash_struct *, toc_hash_struct *);
1355 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1357 /* Hash table to keep track of the argument types for builtin functions. */
1359 struct GTY((for_user)) builtin_hash_struct
1361 tree type;
1362 machine_mode mode[4]; /* return value + 3 arguments. */
1363 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1366 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1368 static hashval_t hash (builtin_hash_struct *);
1369 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1372 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1375 /* Default register names. */
1376 char rs6000_reg_names[][8] =
1378 /* GPRs */
1379 "0", "1", "2", "3", "4", "5", "6", "7",
1380 "8", "9", "10", "11", "12", "13", "14", "15",
1381 "16", "17", "18", "19", "20", "21", "22", "23",
1382 "24", "25", "26", "27", "28", "29", "30", "31",
1383 /* FPRs */
1384 "0", "1", "2", "3", "4", "5", "6", "7",
1385 "8", "9", "10", "11", "12", "13", "14", "15",
1386 "16", "17", "18", "19", "20", "21", "22", "23",
1387 "24", "25", "26", "27", "28", "29", "30", "31",
1388 /* VRs */
1389 "0", "1", "2", "3", "4", "5", "6", "7",
1390 "8", "9", "10", "11", "12", "13", "14", "15",
1391 "16", "17", "18", "19", "20", "21", "22", "23",
1392 "24", "25", "26", "27", "28", "29", "30", "31",
1393 /* lr ctr ca ap */
1394 "lr", "ctr", "ca", "ap",
1395 /* cr0..cr7 */
1396 "0", "1", "2", "3", "4", "5", "6", "7",
1397 /* vrsave vscr sfp */
1398 "vrsave", "vscr", "sfp",
1401 #ifdef TARGET_REGNAMES
1402 static const char alt_reg_names[][8] =
1404 /* GPRs */
1405 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1406 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1407 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1408 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1409 /* FPRs */
1410 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1411 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1412 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1413 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1414 /* VRs */
1415 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1416 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1417 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1418 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1419 /* lr ctr ca ap */
1420 "lr", "ctr", "ca", "ap",
1421 /* cr0..cr7 */
1422 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1423 /* vrsave vscr sfp */
1424 "vrsave", "vscr", "sfp",
1426 #endif
1428 /* Table of valid machine attributes. */
1430 static const struct attribute_spec rs6000_attribute_table[] =
1432 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1433 affects_type_identity, handler, exclude } */
1434 { "altivec", 1, 1, false, true, false, false,
1435 rs6000_handle_altivec_attribute, NULL },
1436 { "longcall", 0, 0, false, true, true, false,
1437 rs6000_handle_longcall_attribute, NULL },
1438 { "shortcall", 0, 0, false, true, true, false,
1439 rs6000_handle_longcall_attribute, NULL },
1440 { "ms_struct", 0, 0, false, false, false, false,
1441 rs6000_handle_struct_attribute, NULL },
1442 { "gcc_struct", 0, 0, false, false, false, false,
1443 rs6000_handle_struct_attribute, NULL },
1444 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1445 SUBTARGET_ATTRIBUTE_TABLE,
1446 #endif
1447 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1450 #ifndef TARGET_PROFILE_KERNEL
1451 #define TARGET_PROFILE_KERNEL 0
1452 #endif
1454 /* Initialize the GCC target structure. */
1455 #undef TARGET_ATTRIBUTE_TABLE
1456 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1457 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1458 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1459 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1460 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1462 #undef TARGET_ASM_ALIGNED_DI_OP
1463 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1465 /* Default unaligned ops are only provided for ELF. Find the ops needed
1466 for non-ELF systems. */
1467 #ifndef OBJECT_FORMAT_ELF
1468 #if TARGET_XCOFF
1469 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1470 64-bit targets. */
1471 #undef TARGET_ASM_UNALIGNED_HI_OP
1472 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1473 #undef TARGET_ASM_UNALIGNED_SI_OP
1474 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1475 #undef TARGET_ASM_UNALIGNED_DI_OP
1476 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1477 #else
1478 /* For Darwin. */
1479 #undef TARGET_ASM_UNALIGNED_HI_OP
1480 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1481 #undef TARGET_ASM_UNALIGNED_SI_OP
1482 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1483 #undef TARGET_ASM_UNALIGNED_DI_OP
1484 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1485 #undef TARGET_ASM_ALIGNED_DI_OP
1486 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1487 #endif
1488 #endif
1490 /* This hook deals with fixups for relocatable code and DI-mode objects
1491 in 64-bit code. */
1492 #undef TARGET_ASM_INTEGER
1493 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1495 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1496 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1497 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1498 #endif
1500 #undef TARGET_SET_UP_BY_PROLOGUE
1501 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1503 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1504 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1505 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1506 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1507 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1508 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1509 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1510 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1511 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1512 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1513 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1514 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1516 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1517 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1519 #undef TARGET_INTERNAL_ARG_POINTER
1520 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1522 #undef TARGET_HAVE_TLS
1523 #define TARGET_HAVE_TLS HAVE_AS_TLS
1525 #undef TARGET_CANNOT_FORCE_CONST_MEM
1526 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1528 #undef TARGET_DELEGITIMIZE_ADDRESS
1529 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1531 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1532 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1534 #undef TARGET_LEGITIMATE_COMBINED_INSN
1535 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1537 #undef TARGET_ASM_FUNCTION_PROLOGUE
1538 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1539 #undef TARGET_ASM_FUNCTION_EPILOGUE
1540 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1542 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1543 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1545 #undef TARGET_LEGITIMIZE_ADDRESS
1546 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1548 #undef TARGET_SCHED_VARIABLE_ISSUE
1549 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1551 #undef TARGET_SCHED_ISSUE_RATE
1552 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1553 #undef TARGET_SCHED_ADJUST_COST
1554 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1555 #undef TARGET_SCHED_ADJUST_PRIORITY
1556 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1557 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1558 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1559 #undef TARGET_SCHED_INIT
1560 #define TARGET_SCHED_INIT rs6000_sched_init
1561 #undef TARGET_SCHED_FINISH
1562 #define TARGET_SCHED_FINISH rs6000_sched_finish
1563 #undef TARGET_SCHED_REORDER
1564 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1565 #undef TARGET_SCHED_REORDER2
1566 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1568 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1569 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1571 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1572 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1574 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1575 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1576 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1577 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1578 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1579 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1580 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1581 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1583 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1584 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1586 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1587 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1588 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1589 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1590 rs6000_builtin_support_vector_misalignment
1591 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1592 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1593 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1594 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1595 rs6000_builtin_vectorization_cost
1596 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1597 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1598 rs6000_preferred_simd_mode
1599 #undef TARGET_VECTORIZE_INIT_COST
1600 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1601 #undef TARGET_VECTORIZE_ADD_STMT_COST
1602 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1603 #undef TARGET_VECTORIZE_FINISH_COST
1604 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1605 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1606 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1608 #undef TARGET_INIT_BUILTINS
1609 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1610 #undef TARGET_BUILTIN_DECL
1611 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1613 #undef TARGET_FOLD_BUILTIN
1614 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1615 #undef TARGET_GIMPLE_FOLD_BUILTIN
1616 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1618 #undef TARGET_EXPAND_BUILTIN
1619 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1621 #undef TARGET_MANGLE_TYPE
1622 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1624 #undef TARGET_INIT_LIBFUNCS
1625 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1627 #if TARGET_MACHO
1628 #undef TARGET_BINDS_LOCAL_P
1629 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1630 #endif
1632 #undef TARGET_MS_BITFIELD_LAYOUT_P
1633 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1635 #undef TARGET_ASM_OUTPUT_MI_THUNK
1636 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1638 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1639 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1642 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1644 #undef TARGET_REGISTER_MOVE_COST
1645 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1646 #undef TARGET_MEMORY_MOVE_COST
1647 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1648 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1649 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1650 rs6000_ira_change_pseudo_allocno_class
1651 #undef TARGET_CANNOT_COPY_INSN_P
1652 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1653 #undef TARGET_RTX_COSTS
1654 #define TARGET_RTX_COSTS rs6000_rtx_costs
1655 #undef TARGET_ADDRESS_COST
1656 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1657 #undef TARGET_INSN_COST
1658 #define TARGET_INSN_COST rs6000_insn_cost
1660 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1661 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1663 #undef TARGET_PROMOTE_FUNCTION_MODE
1664 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1666 #undef TARGET_RETURN_IN_MEMORY
1667 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1669 #undef TARGET_RETURN_IN_MSB
1670 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1672 #undef TARGET_SETUP_INCOMING_VARARGS
1673 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1675 /* Always strict argument naming on rs6000. */
1676 #undef TARGET_STRICT_ARGUMENT_NAMING
1677 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1678 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1679 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1680 #undef TARGET_SPLIT_COMPLEX_ARG
1681 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1682 #undef TARGET_MUST_PASS_IN_STACK
1683 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1684 #undef TARGET_PASS_BY_REFERENCE
1685 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1686 #undef TARGET_ARG_PARTIAL_BYTES
1687 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1688 #undef TARGET_FUNCTION_ARG_ADVANCE
1689 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1690 #undef TARGET_FUNCTION_ARG
1691 #define TARGET_FUNCTION_ARG rs6000_function_arg
1692 #undef TARGET_FUNCTION_ARG_PADDING
1693 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1694 #undef TARGET_FUNCTION_ARG_BOUNDARY
1695 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1697 #undef TARGET_BUILD_BUILTIN_VA_LIST
1698 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1700 #undef TARGET_EXPAND_BUILTIN_VA_START
1701 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1703 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1704 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1706 #undef TARGET_EH_RETURN_FILTER_MODE
1707 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1709 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1710 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1712 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1713 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1715 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1716 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1718 #undef TARGET_FLOATN_MODE
1719 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1721 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1722 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1724 #undef TARGET_MD_ASM_ADJUST
1725 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1727 #undef TARGET_OPTION_OVERRIDE
1728 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1730 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1731 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1732 rs6000_builtin_vectorized_function
1734 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1735 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1736 rs6000_builtin_md_vectorized_function
1738 #undef TARGET_STACK_PROTECT_GUARD
1739 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1741 #if !TARGET_MACHO
1742 #undef TARGET_STACK_PROTECT_FAIL
1743 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1744 #endif
1746 #ifdef HAVE_AS_TLS
1747 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1748 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1749 #endif
1751 /* Use a 32-bit anchor range. This leads to sequences like:
1753 addis tmp,anchor,high
1754 add dest,tmp,low
1756 where tmp itself acts as an anchor, and can be shared between
1757 accesses to the same 64k page. */
1758 #undef TARGET_MIN_ANCHOR_OFFSET
1759 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1760 #undef TARGET_MAX_ANCHOR_OFFSET
1761 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1762 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1763 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1764 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1765 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1767 #undef TARGET_BUILTIN_RECIPROCAL
1768 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1770 #undef TARGET_SECONDARY_RELOAD
1771 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1772 #undef TARGET_SECONDARY_MEMORY_NEEDED
1773 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1774 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1775 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1777 #undef TARGET_LEGITIMATE_ADDRESS_P
1778 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1780 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1781 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1783 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1784 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1786 #undef TARGET_CAN_ELIMINATE
1787 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1789 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1790 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1792 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1793 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1795 #undef TARGET_TRAMPOLINE_INIT
1796 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1798 #undef TARGET_FUNCTION_VALUE
1799 #define TARGET_FUNCTION_VALUE rs6000_function_value
1801 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1802 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1804 #undef TARGET_OPTION_SAVE
1805 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1807 #undef TARGET_OPTION_RESTORE
1808 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1810 #undef TARGET_OPTION_PRINT
1811 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1813 #undef TARGET_CAN_INLINE_P
1814 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1816 #undef TARGET_SET_CURRENT_FUNCTION
1817 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1819 #undef TARGET_LEGITIMATE_CONSTANT_P
1820 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1822 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1823 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1825 #undef TARGET_CAN_USE_DOLOOP_P
1826 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1828 #undef TARGET_PREDICT_DOLOOP_P
1829 #define TARGET_PREDICT_DOLOOP_P rs6000_predict_doloop_p
1831 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1832 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1834 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1835 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1836 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1837 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1838 #undef TARGET_UNWIND_WORD_MODE
1839 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1841 #undef TARGET_OFFLOAD_OPTIONS
1842 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1844 #undef TARGET_C_MODE_FOR_SUFFIX
1845 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1847 #undef TARGET_INVALID_BINARY_OP
1848 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1850 #undef TARGET_OPTAB_SUPPORTED_P
1851 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1853 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1854 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1856 #undef TARGET_COMPARE_VERSION_PRIORITY
1857 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1859 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1860 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1861 rs6000_generate_version_dispatcher_body
1863 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1864 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1865 rs6000_get_function_versions_dispatcher
1867 #undef TARGET_OPTION_FUNCTION_VERSIONS
1868 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1870 #undef TARGET_HARD_REGNO_NREGS
1871 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1872 #undef TARGET_HARD_REGNO_MODE_OK
1873 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1875 #undef TARGET_MODES_TIEABLE_P
1876 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1878 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1879 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1880 rs6000_hard_regno_call_part_clobbered
1882 #undef TARGET_SLOW_UNALIGNED_ACCESS
1883 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1885 #undef TARGET_CAN_CHANGE_MODE_CLASS
1886 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1888 #undef TARGET_CONSTANT_ALIGNMENT
1889 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1891 #undef TARGET_STARTING_FRAME_OFFSET
1892 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1894 #if TARGET_ELF && RS6000_WEAK
1895 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1896 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1897 #endif
1899 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1900 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1902 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1903 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1906 /* Processor table. */
1907 struct rs6000_ptt
1909 const char *const name; /* Canonical processor name. */
1910 const enum processor_type processor; /* Processor type enum value. */
1911 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1914 static struct rs6000_ptt const processor_target_table[] =
1916 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1917 #include "rs6000-cpus.def"
1918 #undef RS6000_CPU
1921 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1922 name is invalid. */
1924 static int
1925 rs6000_cpu_name_lookup (const char *name)
1927 size_t i;
1929 if (name != NULL)
1931 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1932 if (! strcmp (name, processor_target_table[i].name))
1933 return (int)i;
1936 return -1;
1940 /* Return number of consecutive hard regs needed starting at reg REGNO
1941 to hold something of mode MODE.
1942 This is ordinarily the length in words of a value of mode MODE
1943 but can be less for certain modes in special long registers.
1945 POWER and PowerPC GPRs hold 32 bits worth;
1946 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1948 static int
1949 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1951 unsigned HOST_WIDE_INT reg_size;
1953 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1954 128-bit floating point that can go in vector registers, which has VSX
1955 memory addressing. */
1956 if (FP_REGNO_P (regno))
1957 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1958 ? UNITS_PER_VSX_WORD
1959 : UNITS_PER_FP_WORD);
1961 else if (ALTIVEC_REGNO_P (regno))
1962 reg_size = UNITS_PER_ALTIVEC_WORD;
1964 else
1965 reg_size = UNITS_PER_WORD;
1967 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1970 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1971 MODE. */
1972 static int
1973 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
1975 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1977 if (COMPLEX_MODE_P (mode))
1978 mode = GET_MODE_INNER (mode);
1980 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1981 register combinations, and use PTImode where we need to deal with quad
1982 word memory operations. Don't allow quad words in the argument or frame
1983 pointer registers, just registers 0..31. */
1984 if (mode == PTImode)
1985 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1986 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1987 && ((regno & 1) == 0));
1989 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1990 implementations. Don't allow an item to be split between a FP register
1991 and an Altivec register. Allow TImode in all VSX registers if the user
1992 asked for it. */
1993 if (TARGET_VSX && VSX_REGNO_P (regno)
1994 && (VECTOR_MEM_VSX_P (mode)
1995 || FLOAT128_VECTOR_P (mode)
1996 || reg_addr[mode].scalar_in_vmx_p
1997 || mode == TImode
1998 || (TARGET_VADDUQM && mode == V1TImode)))
2000 if (FP_REGNO_P (regno))
2001 return FP_REGNO_P (last_regno);
2003 if (ALTIVEC_REGNO_P (regno))
2005 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2006 return 0;
2008 return ALTIVEC_REGNO_P (last_regno);
2012 /* The GPRs can hold any mode, but values bigger than one register
2013 cannot go past R31. */
2014 if (INT_REGNO_P (regno))
2015 return INT_REGNO_P (last_regno);
2017 /* The float registers (except for VSX vector modes) can only hold floating
2018 modes and DImode. */
2019 if (FP_REGNO_P (regno))
2021 if (FLOAT128_VECTOR_P (mode))
2022 return false;
2024 if (SCALAR_FLOAT_MODE_P (mode)
2025 && (mode != TDmode || (regno % 2) == 0)
2026 && FP_REGNO_P (last_regno))
2027 return 1;
2029 if (GET_MODE_CLASS (mode) == MODE_INT)
2031 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2032 return 1;
2034 if (TARGET_P8_VECTOR && (mode == SImode))
2035 return 1;
2037 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2038 return 1;
2041 return 0;
2044 /* The CR register can only hold CC modes. */
2045 if (CR_REGNO_P (regno))
2046 return GET_MODE_CLASS (mode) == MODE_CC;
2048 if (CA_REGNO_P (regno))
2049 return mode == Pmode || mode == SImode;
2051 /* AltiVec only in AldyVec registers. */
2052 if (ALTIVEC_REGNO_P (regno))
2053 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2054 || mode == V1TImode);
2056 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2057 and it must be able to fit within the register set. */
2059 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2062 /* Implement TARGET_HARD_REGNO_NREGS. */
2064 static unsigned int
2065 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2067 return rs6000_hard_regno_nregs[mode][regno];
2070 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2072 static bool
2073 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2075 return rs6000_hard_regno_mode_ok_p[mode][regno];
2078 /* Implement TARGET_MODES_TIEABLE_P.
2080 PTImode cannot tie with other modes because PTImode is restricted to even
2081 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2082 57744).
2084 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2085 128-bit floating point on VSX systems ties with other vectors. */
2087 static bool
2088 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2090 if (mode1 == PTImode)
2091 return mode2 == PTImode;
2092 if (mode2 == PTImode)
2093 return false;
2095 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2096 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2097 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2098 return false;
2100 if (SCALAR_FLOAT_MODE_P (mode1))
2101 return SCALAR_FLOAT_MODE_P (mode2);
2102 if (SCALAR_FLOAT_MODE_P (mode2))
2103 return false;
2105 if (GET_MODE_CLASS (mode1) == MODE_CC)
2106 return GET_MODE_CLASS (mode2) == MODE_CC;
2107 if (GET_MODE_CLASS (mode2) == MODE_CC)
2108 return false;
2110 return true;
2113 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2115 static bool
2116 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2117 unsigned int regno, machine_mode mode)
2119 if (TARGET_32BIT
2120 && TARGET_POWERPC64
2121 && GET_MODE_SIZE (mode) > 4
2122 && INT_REGNO_P (regno))
2123 return true;
2125 if (TARGET_VSX
2126 && FP_REGNO_P (regno)
2127 && GET_MODE_SIZE (mode) > 8
2128 && !FLOAT128_2REG_P (mode))
2129 return true;
2131 return false;
2134 /* Print interesting facts about registers. */
2135 static void
2136 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2138 int r, m;
2140 for (r = first_regno; r <= last_regno; ++r)
2142 const char *comma = "";
2143 int len;
2145 if (first_regno == last_regno)
2146 fprintf (stderr, "%s:\t", reg_name);
2147 else
2148 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2150 len = 8;
2151 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2152 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2154 if (len > 70)
2156 fprintf (stderr, ",\n\t");
2157 len = 8;
2158 comma = "";
2161 if (rs6000_hard_regno_nregs[m][r] > 1)
2162 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2163 rs6000_hard_regno_nregs[m][r]);
2164 else
2165 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2167 comma = ", ";
2170 if (call_used_regs[r])
2172 if (len > 70)
2174 fprintf (stderr, ",\n\t");
2175 len = 8;
2176 comma = "";
2179 len += fprintf (stderr, "%s%s", comma, "call-used");
2180 comma = ", ";
2183 if (fixed_regs[r])
2185 if (len > 70)
2187 fprintf (stderr, ",\n\t");
2188 len = 8;
2189 comma = "";
2192 len += fprintf (stderr, "%s%s", comma, "fixed");
2193 comma = ", ";
2196 if (len > 70)
2198 fprintf (stderr, ",\n\t");
2199 comma = "";
2202 len += fprintf (stderr, "%sreg-class = %s", comma,
2203 reg_class_names[(int)rs6000_regno_regclass[r]]);
2204 comma = ", ";
2206 if (len > 70)
2208 fprintf (stderr, ",\n\t");
2209 comma = "";
2212 fprintf (stderr, "%sregno = %d\n", comma, r);
2216 static const char *
2217 rs6000_debug_vector_unit (enum rs6000_vector v)
2219 const char *ret;
2221 switch (v)
2223 case VECTOR_NONE: ret = "none"; break;
2224 case VECTOR_ALTIVEC: ret = "altivec"; break;
2225 case VECTOR_VSX: ret = "vsx"; break;
2226 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2227 default: ret = "unknown"; break;
2230 return ret;
2233 /* Inner function printing just the address mask for a particular reload
2234 register class. */
2235 DEBUG_FUNCTION char *
2236 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2238 static char ret[8];
2239 char *p = ret;
2241 if ((mask & RELOAD_REG_VALID) != 0)
2242 *p++ = 'v';
2243 else if (keep_spaces)
2244 *p++ = ' ';
2246 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2247 *p++ = 'm';
2248 else if (keep_spaces)
2249 *p++ = ' ';
2251 if ((mask & RELOAD_REG_INDEXED) != 0)
2252 *p++ = 'i';
2253 else if (keep_spaces)
2254 *p++ = ' ';
2256 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2257 *p++ = 'O';
2258 else if ((mask & RELOAD_REG_OFFSET) != 0)
2259 *p++ = 'o';
2260 else if (keep_spaces)
2261 *p++ = ' ';
2263 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2264 *p++ = '+';
2265 else if (keep_spaces)
2266 *p++ = ' ';
2268 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2269 *p++ = '+';
2270 else if (keep_spaces)
2271 *p++ = ' ';
2273 if ((mask & RELOAD_REG_AND_M16) != 0)
2274 *p++ = '&';
2275 else if (keep_spaces)
2276 *p++ = ' ';
2278 *p = '\0';
2280 return ret;
2283 /* Print the address masks in a human readble fashion. */
2284 DEBUG_FUNCTION void
2285 rs6000_debug_print_mode (ssize_t m)
2287 ssize_t rc;
2288 int spaces = 0;
2290 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2291 for (rc = 0; rc < N_RELOAD_REG; rc++)
2292 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2293 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2295 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2296 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2298 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2299 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2300 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2301 spaces = 0;
2303 else
2304 spaces += sizeof (" Reload=sl") - 1;
2306 if (reg_addr[m].scalar_in_vmx_p)
2308 fprintf (stderr, "%*s Upper=y", spaces, "");
2309 spaces = 0;
2311 else
2312 spaces += sizeof (" Upper=y") - 1;
2314 if (rs6000_vector_unit[m] != VECTOR_NONE
2315 || rs6000_vector_mem[m] != VECTOR_NONE)
2317 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2318 spaces, "",
2319 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2320 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2323 fputs ("\n", stderr);
2326 #define DEBUG_FMT_ID "%-32s= "
2327 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2328 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2329 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2331 /* Print various interesting information with -mdebug=reg. */
2332 static void
2333 rs6000_debug_reg_global (void)
2335 static const char *const tf[2] = { "false", "true" };
2336 const char *nl = (const char *)0;
2337 int m;
2338 size_t m1, m2, v;
2339 char costly_num[20];
2340 char nop_num[20];
2341 char flags_buffer[40];
2342 const char *costly_str;
2343 const char *nop_str;
2344 const char *trace_str;
2345 const char *abi_str;
2346 const char *cmodel_str;
2347 struct cl_target_option cl_opts;
2349 /* Modes we want tieable information on. */
2350 static const machine_mode print_tieable_modes[] = {
2351 QImode,
2352 HImode,
2353 SImode,
2354 DImode,
2355 TImode,
2356 PTImode,
2357 SFmode,
2358 DFmode,
2359 TFmode,
2360 IFmode,
2361 KFmode,
2362 SDmode,
2363 DDmode,
2364 TDmode,
2365 V16QImode,
2366 V8HImode,
2367 V4SImode,
2368 V2DImode,
2369 V1TImode,
2370 V32QImode,
2371 V16HImode,
2372 V8SImode,
2373 V4DImode,
2374 V2TImode,
2375 V4SFmode,
2376 V2DFmode,
2377 V8SFmode,
2378 V4DFmode,
2379 CCmode,
2380 CCUNSmode,
2381 CCEQmode,
2384 /* Virtual regs we are interested in. */
2385 const static struct {
2386 int regno; /* register number. */
2387 const char *name; /* register name. */
2388 } virtual_regs[] = {
2389 { STACK_POINTER_REGNUM, "stack pointer:" },
2390 { TOC_REGNUM, "toc: " },
2391 { STATIC_CHAIN_REGNUM, "static chain: " },
2392 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2393 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2394 { ARG_POINTER_REGNUM, "arg pointer: " },
2395 { FRAME_POINTER_REGNUM, "frame pointer:" },
2396 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2397 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2398 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2399 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2400 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2401 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2402 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2403 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2404 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2407 fputs ("\nHard register information:\n", stderr);
2408 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2409 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2410 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2411 LAST_ALTIVEC_REGNO,
2412 "vs");
2413 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2414 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2415 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2416 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2417 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2418 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2420 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2421 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2422 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2424 fprintf (stderr,
2425 "\n"
2426 "d reg_class = %s\n"
2427 "f reg_class = %s\n"
2428 "v reg_class = %s\n"
2429 "wa reg_class = %s\n"
2430 "we reg_class = %s\n"
2431 "wr reg_class = %s\n"
2432 "wx reg_class = %s\n"
2433 "wA reg_class = %s\n"
2434 "\n",
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2440 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2441 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2442 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
2444 nl = "\n";
2445 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2446 rs6000_debug_print_mode (m);
2448 fputs ("\n", stderr);
2450 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2452 machine_mode mode1 = print_tieable_modes[m1];
2453 bool first_time = true;
2455 nl = (const char *)0;
2456 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2458 machine_mode mode2 = print_tieable_modes[m2];
2459 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2461 if (first_time)
2463 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2464 nl = "\n";
2465 first_time = false;
2468 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2472 if (!first_time)
2473 fputs ("\n", stderr);
2476 if (nl)
2477 fputs (nl, stderr);
2479 if (rs6000_recip_control)
2481 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2483 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2484 if (rs6000_recip_bits[m])
2486 fprintf (stderr,
2487 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2488 GET_MODE_NAME (m),
2489 (RS6000_RECIP_AUTO_RE_P (m)
2490 ? "auto"
2491 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2492 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2493 ? "auto"
2494 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2497 fputs ("\n", stderr);
2500 if (rs6000_cpu_index >= 0)
2502 const char *name = processor_target_table[rs6000_cpu_index].name;
2503 HOST_WIDE_INT flags
2504 = processor_target_table[rs6000_cpu_index].target_enable;
2506 sprintf (flags_buffer, "-mcpu=%s flags", name);
2507 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2509 else
2510 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2512 if (rs6000_tune_index >= 0)
2514 const char *name = processor_target_table[rs6000_tune_index].name;
2515 HOST_WIDE_INT flags
2516 = processor_target_table[rs6000_tune_index].target_enable;
2518 sprintf (flags_buffer, "-mtune=%s flags", name);
2519 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2521 else
2522 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2524 cl_target_option_save (&cl_opts, &global_options);
2525 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2526 rs6000_isa_flags);
2528 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2529 rs6000_isa_flags_explicit);
2531 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2532 rs6000_builtin_mask);
2534 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2536 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2537 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2539 switch (rs6000_sched_costly_dep)
2541 case max_dep_latency:
2542 costly_str = "max_dep_latency";
2543 break;
2545 case no_dep_costly:
2546 costly_str = "no_dep_costly";
2547 break;
2549 case all_deps_costly:
2550 costly_str = "all_deps_costly";
2551 break;
2553 case true_store_to_load_dep_costly:
2554 costly_str = "true_store_to_load_dep_costly";
2555 break;
2557 case store_to_load_dep_costly:
2558 costly_str = "store_to_load_dep_costly";
2559 break;
2561 default:
2562 costly_str = costly_num;
2563 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2564 break;
2567 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2569 switch (rs6000_sched_insert_nops)
2571 case sched_finish_regroup_exact:
2572 nop_str = "sched_finish_regroup_exact";
2573 break;
2575 case sched_finish_pad_groups:
2576 nop_str = "sched_finish_pad_groups";
2577 break;
2579 case sched_finish_none:
2580 nop_str = "sched_finish_none";
2581 break;
2583 default:
2584 nop_str = nop_num;
2585 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2586 break;
2589 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2591 switch (rs6000_sdata)
2593 default:
2594 case SDATA_NONE:
2595 break;
2597 case SDATA_DATA:
2598 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2599 break;
2601 case SDATA_SYSV:
2602 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2603 break;
2605 case SDATA_EABI:
2606 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2607 break;
2611 switch (rs6000_traceback)
2613 case traceback_default: trace_str = "default"; break;
2614 case traceback_none: trace_str = "none"; break;
2615 case traceback_part: trace_str = "part"; break;
2616 case traceback_full: trace_str = "full"; break;
2617 default: trace_str = "unknown"; break;
2620 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2622 switch (rs6000_current_cmodel)
2624 case CMODEL_SMALL: cmodel_str = "small"; break;
2625 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2626 case CMODEL_LARGE: cmodel_str = "large"; break;
2627 default: cmodel_str = "unknown"; break;
2630 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2632 switch (rs6000_current_abi)
2634 case ABI_NONE: abi_str = "none"; break;
2635 case ABI_AIX: abi_str = "aix"; break;
2636 case ABI_ELFv2: abi_str = "ELFv2"; break;
2637 case ABI_V4: abi_str = "V4"; break;
2638 case ABI_DARWIN: abi_str = "darwin"; break;
2639 default: abi_str = "unknown"; break;
2642 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2644 if (rs6000_altivec_abi)
2645 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2647 if (rs6000_darwin64_abi)
2648 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2650 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2651 (TARGET_SOFT_FLOAT ? "true" : "false"));
2653 if (TARGET_LINK_STACK)
2654 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2656 if (TARGET_P8_FUSION)
2658 char options[80];
2660 strcpy (options, "power8");
2661 if (TARGET_P8_FUSION_SIGN)
2662 strcat (options, ", sign");
2664 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2667 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2668 TARGET_SECURE_PLT ? "secure" : "bss");
2669 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2670 aix_struct_return ? "aix" : "sysv");
2671 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2672 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2673 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2674 tf[!!rs6000_align_branch_targets]);
2675 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2676 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2677 rs6000_long_double_type_size);
2678 if (rs6000_long_double_type_size > 64)
2680 fprintf (stderr, DEBUG_FMT_S, "long double type",
2681 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2682 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2683 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2685 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2686 (int)rs6000_sched_restricted_insns_priority);
2687 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2688 (int)END_BUILTINS);
2689 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2690 (int)RS6000_BUILTIN_COUNT);
2692 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2693 (int)TARGET_FLOAT128_ENABLE_TYPE);
2695 if (TARGET_VSX)
2696 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2697 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2699 if (TARGET_DIRECT_MOVE_128)
2700 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2701 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2705 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2706 legitimate address support to figure out the appropriate addressing to
2707 use. */
2709 static void
2710 rs6000_setup_reg_addr_masks (void)
2712 ssize_t rc, reg, m, nregs;
2713 addr_mask_type any_addr_mask, addr_mask;
2715 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2717 machine_mode m2 = (machine_mode) m;
2718 bool complex_p = false;
2719 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2720 size_t msize;
2722 if (COMPLEX_MODE_P (m2))
2724 complex_p = true;
2725 m2 = GET_MODE_INNER (m2);
2728 msize = GET_MODE_SIZE (m2);
2730 /* SDmode is special in that we want to access it only via REG+REG
2731 addressing on power7 and above, since we want to use the LFIWZX and
2732 STFIWZX instructions to load it. */
2733 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2735 any_addr_mask = 0;
2736 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2738 addr_mask = 0;
2739 reg = reload_reg_map[rc].reg;
2741 /* Can mode values go in the GPR/FPR/Altivec registers? */
2742 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2744 bool small_int_vsx_p = (small_int_p
2745 && (rc == RELOAD_REG_FPR
2746 || rc == RELOAD_REG_VMX));
2748 nregs = rs6000_hard_regno_nregs[m][reg];
2749 addr_mask |= RELOAD_REG_VALID;
2751 /* Indicate if the mode takes more than 1 physical register. If
2752 it takes a single register, indicate it can do REG+REG
2753 addressing. Small integers in VSX registers can only do
2754 REG+REG addressing. */
2755 if (small_int_vsx_p)
2756 addr_mask |= RELOAD_REG_INDEXED;
2757 else if (nregs > 1 || m == BLKmode || complex_p)
2758 addr_mask |= RELOAD_REG_MULTIPLE;
2759 else
2760 addr_mask |= RELOAD_REG_INDEXED;
2762 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2763 addressing. If we allow scalars into Altivec registers,
2764 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2766 For VSX systems, we don't allow update addressing for
2767 DFmode/SFmode if those registers can go in both the
2768 traditional floating point registers and Altivec registers.
2769 The load/store instructions for the Altivec registers do not
2770 have update forms. If we allowed update addressing, it seems
2771 to break IV-OPT code using floating point if the index type is
2772 int instead of long (PR target/81550 and target/84042). */
2774 if (TARGET_UPDATE
2775 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2776 && msize <= 8
2777 && !VECTOR_MODE_P (m2)
2778 && !FLOAT128_VECTOR_P (m2)
2779 && !complex_p
2780 && (m != E_DFmode || !TARGET_VSX)
2781 && (m != E_SFmode || !TARGET_P8_VECTOR)
2782 && !small_int_vsx_p)
2784 addr_mask |= RELOAD_REG_PRE_INCDEC;
2786 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2787 we don't allow PRE_MODIFY for some multi-register
2788 operations. */
2789 switch (m)
2791 default:
2792 addr_mask |= RELOAD_REG_PRE_MODIFY;
2793 break;
2795 case E_DImode:
2796 if (TARGET_POWERPC64)
2797 addr_mask |= RELOAD_REG_PRE_MODIFY;
2798 break;
2800 case E_DFmode:
2801 case E_DDmode:
2802 if (TARGET_HARD_FLOAT)
2803 addr_mask |= RELOAD_REG_PRE_MODIFY;
2804 break;
2809 /* GPR and FPR registers can do REG+OFFSET addressing, except
2810 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2811 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2812 if ((addr_mask != 0) && !indexed_only_p
2813 && msize <= 8
2814 && (rc == RELOAD_REG_GPR
2815 || ((msize == 8 || m2 == SFmode)
2816 && (rc == RELOAD_REG_FPR
2817 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2818 addr_mask |= RELOAD_REG_OFFSET;
2820 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2821 instructions are enabled. The offset for 128-bit VSX registers is
2822 only 12-bits. While GPRs can handle the full offset range, VSX
2823 registers can only handle the restricted range. */
2824 else if ((addr_mask != 0) && !indexed_only_p
2825 && msize == 16 && TARGET_P9_VECTOR
2826 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2827 || (m2 == TImode && TARGET_VSX)))
2829 addr_mask |= RELOAD_REG_OFFSET;
2830 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2831 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2834 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2835 addressing on 128-bit types. */
2836 if (rc == RELOAD_REG_VMX && msize == 16
2837 && (addr_mask & RELOAD_REG_VALID) != 0)
2838 addr_mask |= RELOAD_REG_AND_M16;
2840 reg_addr[m].addr_mask[rc] = addr_mask;
2841 any_addr_mask |= addr_mask;
2844 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2849 /* Initialize the various global tables that are based on register size. */
2850 static void
2851 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2853 ssize_t r, m, c;
2854 int align64;
2855 int align32;
2857 /* Precalculate REGNO_REG_CLASS. */
2858 rs6000_regno_regclass[0] = GENERAL_REGS;
2859 for (r = 1; r < 32; ++r)
2860 rs6000_regno_regclass[r] = BASE_REGS;
2862 for (r = 32; r < 64; ++r)
2863 rs6000_regno_regclass[r] = FLOAT_REGS;
2865 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2866 rs6000_regno_regclass[r] = NO_REGS;
2868 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2869 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2871 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2872 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2873 rs6000_regno_regclass[r] = CR_REGS;
2875 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2876 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2877 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2878 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2879 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2881 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2883 /* Precalculate register class to simpler reload register class. We don't
2884 need all of the register classes that are combinations of different
2885 classes, just the simple ones that have constraint letters. */
2886 for (c = 0; c < N_REG_CLASSES; c++)
2887 reg_class_to_reg_type[c] = NO_REG_TYPE;
2889 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2890 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2891 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2892 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2893 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2894 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2895 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2897 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2898 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2900 if (TARGET_VSX)
2902 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2903 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2905 else
2907 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2908 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2911 /* Precalculate the valid memory formats as well as the vector information,
2912 this must be set up before the rs6000_hard_regno_nregs_internal calls
2913 below. */
2914 gcc_assert ((int)VECTOR_NONE == 0);
2915 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2916 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
2918 gcc_assert ((int)CODE_FOR_nothing == 0);
2919 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2921 gcc_assert ((int)NO_REGS == 0);
2922 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2924 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2925 believes it can use native alignment or still uses 128-bit alignment. */
2926 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2928 align64 = 64;
2929 align32 = 32;
2931 else
2933 align64 = 128;
2934 align32 = 128;
2937 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2938 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2939 if (TARGET_FLOAT128_TYPE)
2941 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2942 rs6000_vector_align[KFmode] = 128;
2944 if (FLOAT128_IEEE_P (TFmode))
2946 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2947 rs6000_vector_align[TFmode] = 128;
2951 /* V2DF mode, VSX only. */
2952 if (TARGET_VSX)
2954 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2955 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2956 rs6000_vector_align[V2DFmode] = align64;
2959 /* V4SF mode, either VSX or Altivec. */
2960 if (TARGET_VSX)
2962 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2963 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2964 rs6000_vector_align[V4SFmode] = align32;
2966 else if (TARGET_ALTIVEC)
2968 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2969 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2970 rs6000_vector_align[V4SFmode] = align32;
2973 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2974 and stores. */
2975 if (TARGET_ALTIVEC)
2977 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2978 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2979 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2980 rs6000_vector_align[V4SImode] = align32;
2981 rs6000_vector_align[V8HImode] = align32;
2982 rs6000_vector_align[V16QImode] = align32;
2984 if (TARGET_VSX)
2986 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2987 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2988 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2990 else
2992 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2993 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2994 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2998 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2999 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3000 if (TARGET_VSX)
3002 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3003 rs6000_vector_unit[V2DImode]
3004 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3005 rs6000_vector_align[V2DImode] = align64;
3007 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3008 rs6000_vector_unit[V1TImode]
3009 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3010 rs6000_vector_align[V1TImode] = 128;
3013 /* DFmode, see if we want to use the VSX unit. Memory is handled
3014 differently, so don't set rs6000_vector_mem. */
3015 if (TARGET_VSX)
3017 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3018 rs6000_vector_align[DFmode] = 64;
3021 /* SFmode, see if we want to use the VSX unit. */
3022 if (TARGET_P8_VECTOR)
3024 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3025 rs6000_vector_align[SFmode] = 32;
3028 /* Allow TImode in VSX register and set the VSX memory macros. */
3029 if (TARGET_VSX)
3031 rs6000_vector_mem[TImode] = VECTOR_VSX;
3032 rs6000_vector_align[TImode] = align64;
3035 /* Register class constraints for the constraints that depend on compile
3036 switches. When the VSX code was added, different constraints were added
3037 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3038 of the VSX registers are used. The register classes for scalar floating
3039 point types is set, based on whether we allow that type into the upper
3040 (Altivec) registers. GCC has register classes to target the Altivec
3041 registers for load/store operations, to select using a VSX memory
3042 operation instead of the traditional floating point operation. The
3043 constraints are:
3045 d - Register class to use with traditional DFmode instructions.
3046 f - Register class to use with traditional SFmode instructions.
3047 v - Altivec register.
3048 wa - Any VSX register.
3049 wc - Reserved to represent individual CR bits (used in LLVM).
3050 wn - always NO_REGS.
3051 wr - GPR if 64-bit mode is permitted.
3052 wx - Float register if we can do 32-bit int stores. */
3054 if (TARGET_HARD_FLOAT)
3056 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3057 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3060 if (TARGET_VSX)
3061 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3063 /* Add conditional constraints based on various options, to allow us to
3064 collapse multiple insn patterns. */
3065 if (TARGET_ALTIVEC)
3066 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3068 if (TARGET_POWERPC64)
3070 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3071 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3074 if (TARGET_STFIWX)
3075 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3077 /* Support for new direct moves (ISA 3.0 + 64bit). */
3078 if (TARGET_DIRECT_MOVE_128)
3079 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3081 /* Set up the reload helper and direct move functions. */
3082 if (TARGET_VSX || TARGET_ALTIVEC)
3084 if (TARGET_64BIT)
3086 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3087 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3088 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3089 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3090 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3091 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3092 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3093 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3094 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3095 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3096 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3097 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3098 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3099 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3100 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3101 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3102 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3103 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3104 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3105 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3107 if (FLOAT128_VECTOR_P (KFmode))
3109 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3110 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3113 if (FLOAT128_VECTOR_P (TFmode))
3115 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3116 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3119 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3120 available. */
3121 if (TARGET_NO_SDMODE_STACK)
3123 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3124 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3127 if (TARGET_VSX)
3129 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3130 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3133 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3135 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3136 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3137 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3138 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3139 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3140 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3141 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3142 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3143 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3145 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3146 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3147 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3148 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3149 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3150 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3151 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3152 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3153 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3155 if (FLOAT128_VECTOR_P (KFmode))
3157 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3158 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3161 if (FLOAT128_VECTOR_P (TFmode))
3163 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3164 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3168 else
3170 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3171 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3172 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3173 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3174 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3175 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3176 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3177 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3178 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3179 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3180 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3181 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3182 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3183 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3184 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3185 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3186 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3187 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3188 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3189 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3191 if (FLOAT128_VECTOR_P (KFmode))
3193 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3194 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3197 if (FLOAT128_IEEE_P (TFmode))
3199 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3200 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3203 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3204 available. */
3205 if (TARGET_NO_SDMODE_STACK)
3207 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3208 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3211 if (TARGET_VSX)
3213 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3214 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3217 if (TARGET_DIRECT_MOVE)
3219 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3220 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3221 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3225 reg_addr[DFmode].scalar_in_vmx_p = true;
3226 reg_addr[DImode].scalar_in_vmx_p = true;
3228 if (TARGET_P8_VECTOR)
3230 reg_addr[SFmode].scalar_in_vmx_p = true;
3231 reg_addr[SImode].scalar_in_vmx_p = true;
3233 if (TARGET_P9_VECTOR)
3235 reg_addr[HImode].scalar_in_vmx_p = true;
3236 reg_addr[QImode].scalar_in_vmx_p = true;
3241 /* Precalculate HARD_REGNO_NREGS. */
3242 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3243 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3244 rs6000_hard_regno_nregs[m][r]
3245 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3247 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3248 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3249 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3250 rs6000_hard_regno_mode_ok_p[m][r]
3251 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3253 /* Precalculate CLASS_MAX_NREGS sizes. */
3254 for (c = 0; c < LIM_REG_CLASSES; ++c)
3256 int reg_size;
3258 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3259 reg_size = UNITS_PER_VSX_WORD;
3261 else if (c == ALTIVEC_REGS)
3262 reg_size = UNITS_PER_ALTIVEC_WORD;
3264 else if (c == FLOAT_REGS)
3265 reg_size = UNITS_PER_FP_WORD;
3267 else
3268 reg_size = UNITS_PER_WORD;
3270 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3272 machine_mode m2 = (machine_mode)m;
3273 int reg_size2 = reg_size;
3275 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3276 in VSX. */
3277 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3278 reg_size2 = UNITS_PER_FP_WORD;
3280 rs6000_class_max_nregs[m][c]
3281 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3285 /* Calculate which modes to automatically generate code to use a the
3286 reciprocal divide and square root instructions. In the future, possibly
3287 automatically generate the instructions even if the user did not specify
3288 -mrecip. The older machines double precision reciprocal sqrt estimate is
3289 not accurate enough. */
3290 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3291 if (TARGET_FRES)
3292 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3293 if (TARGET_FRE)
3294 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3295 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3296 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3297 if (VECTOR_UNIT_VSX_P (V2DFmode))
3298 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3300 if (TARGET_FRSQRTES)
3301 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3302 if (TARGET_FRSQRTE)
3303 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3304 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3305 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3306 if (VECTOR_UNIT_VSX_P (V2DFmode))
3307 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3309 if (rs6000_recip_control)
3311 if (!flag_finite_math_only)
3312 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3313 "-ffast-math");
3314 if (flag_trapping_math)
3315 warning (0, "%qs requires %qs or %qs", "-mrecip",
3316 "-fno-trapping-math", "-ffast-math");
3317 if (!flag_reciprocal_math)
3318 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3319 "-ffast-math");
3320 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3322 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3323 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3324 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3326 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3327 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3328 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3330 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3331 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3332 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3334 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3335 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3336 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3338 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3339 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3340 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3342 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3343 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3344 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3346 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3347 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3348 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3350 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3351 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3352 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3356 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3357 legitimate address support to figure out the appropriate addressing to
3358 use. */
3359 rs6000_setup_reg_addr_masks ();
3361 if (global_init_p || TARGET_DEBUG_TARGET)
3363 if (TARGET_DEBUG_REG)
3364 rs6000_debug_reg_global ();
3366 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3367 fprintf (stderr,
3368 "SImode variable mult cost = %d\n"
3369 "SImode constant mult cost = %d\n"
3370 "SImode short constant mult cost = %d\n"
3371 "DImode multipliciation cost = %d\n"
3372 "SImode division cost = %d\n"
3373 "DImode division cost = %d\n"
3374 "Simple fp operation cost = %d\n"
3375 "DFmode multiplication cost = %d\n"
3376 "SFmode division cost = %d\n"
3377 "DFmode division cost = %d\n"
3378 "cache line size = %d\n"
3379 "l1 cache size = %d\n"
3380 "l2 cache size = %d\n"
3381 "simultaneous prefetches = %d\n"
3382 "\n",
3383 rs6000_cost->mulsi,
3384 rs6000_cost->mulsi_const,
3385 rs6000_cost->mulsi_const9,
3386 rs6000_cost->muldi,
3387 rs6000_cost->divsi,
3388 rs6000_cost->divdi,
3389 rs6000_cost->fp,
3390 rs6000_cost->dmul,
3391 rs6000_cost->sdiv,
3392 rs6000_cost->ddiv,
3393 rs6000_cost->cache_line_size,
3394 rs6000_cost->l1_cache_size,
3395 rs6000_cost->l2_cache_size,
3396 rs6000_cost->simultaneous_prefetches);
3400 #if TARGET_MACHO
3401 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3403 static void
3404 darwin_rs6000_override_options (void)
3406 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3407 off. */
3408 rs6000_altivec_abi = 1;
3409 TARGET_ALTIVEC_VRSAVE = 1;
3410 rs6000_current_abi = ABI_DARWIN;
3412 if (DEFAULT_ABI == ABI_DARWIN
3413 && TARGET_64BIT)
3414 darwin_one_byte_bool = 1;
3416 if (TARGET_64BIT && ! TARGET_POWERPC64)
3418 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3419 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3422 /* The linkers [ld64] that support 64Bit do not need the JBSR longcall
3423 optimisation, and will not work with the most generic case (where the
3424 symbol is undefined external, but there is no symbl stub). */
3425 if (TARGET_64BIT)
3426 rs6000_default_long_calls = 0;
3428 /* ld_classic is (so far) still used for kernel (static) code, and supports
3429 the JBSR longcall / branch islands. */
3430 if (flag_mkernel)
3432 rs6000_default_long_calls = 1;
3434 /* Allow a kext author to do -mkernel -mhard-float. */
3435 if (! (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT))
3436 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3439 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3440 Altivec. */
3441 if (!flag_mkernel && !flag_apple_kext
3442 && TARGET_64BIT
3443 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3444 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3446 /* Unless the user (not the configurer) has explicitly overridden
3447 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3448 G4 unless targeting the kernel. */
3449 if (!flag_mkernel
3450 && !flag_apple_kext
3451 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3452 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3453 && ! global_options_set.x_rs6000_cpu_index)
3455 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3458 #endif
3460 /* If not otherwise specified by a target, make 'long double' equivalent to
3461 'double'. */
3463 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3464 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3465 #endif
3467 /* Return the builtin mask of the various options used that could affect which
3468 builtins were used. In the past we used target_flags, but we've run out of
3469 bits, and some options are no longer in target_flags. */
3471 HOST_WIDE_INT
3472 rs6000_builtin_mask_calculate (void)
3474 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3475 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3476 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3477 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3478 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3479 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3480 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3481 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3482 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3483 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3484 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3485 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3486 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3487 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3488 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3489 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3490 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3491 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3492 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3493 | ((TARGET_LONG_DOUBLE_128
3494 && TARGET_HARD_FLOAT
3495 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3496 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3497 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3500 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3501 to clobber the XER[CA] bit because clobbering that bit without telling
3502 the compiler worked just fine with versions of GCC before GCC 5, and
3503 breaking a lot of older code in ways that are hard to track down is
3504 not such a great idea. */
3506 static rtx_insn *
3507 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3508 vec<const char *> &/*constraints*/,
3509 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3511 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3512 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3513 return NULL;
3516 /* Override command line options.
3518 Combine build-specific configuration information with options
3519 specified on the command line to set various state variables which
3520 influence code generation, optimization, and expansion of built-in
3521 functions. Assure that command-line configuration preferences are
3522 compatible with each other and with the build configuration; issue
3523 warnings while adjusting configuration or error messages while
3524 rejecting configuration.
3526 Upon entry to this function:
3528 This function is called once at the beginning of
3529 compilation, and then again at the start and end of compiling
3530 each section of code that has a different configuration, as
3531 indicated, for example, by adding the
3533 __attribute__((__target__("cpu=power9")))
3535 qualifier to a function definition or, for example, by bracketing
3536 code between
3538 #pragma GCC target("altivec")
3542 #pragma GCC reset_options
3544 directives. Parameter global_init_p is true for the initial
3545 invocation, which initializes global variables, and false for all
3546 subsequent invocations.
3549 Various global state information is assumed to be valid. This
3550 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3551 default CPU specified at build configure time, TARGET_DEFAULT,
3552 representing the default set of option flags for the default
3553 target, and global_options_set.x_rs6000_isa_flags, representing
3554 which options were requested on the command line.
3556 Upon return from this function:
3558 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3559 was set by name on the command line. Additionally, if certain
3560 attributes are automatically enabled or disabled by this function
3561 in order to assure compatibility between options and
3562 configuration, the flags associated with those attributes are
3563 also set. By setting these "explicit bits", we avoid the risk
3564 that other code might accidentally overwrite these particular
3565 attributes with "default values".
3567 The various bits of rs6000_isa_flags are set to indicate the
3568 target options that have been selected for the most current
3569 compilation efforts. This has the effect of also turning on the
3570 associated TARGET_XXX values since these are macros which are
3571 generally defined to test the corresponding bit of the
3572 rs6000_isa_flags variable.
3574 The variable rs6000_builtin_mask is set to represent the target
3575 options for the most current compilation efforts, consistent with
3576 the current contents of rs6000_isa_flags. This variable controls
3577 expansion of built-in functions.
3579 Various other global variables and fields of global structures
3580 (over 50 in all) are initialized to reflect the desired options
3581 for the most current compilation efforts. */
3583 static bool
3584 rs6000_option_override_internal (bool global_init_p)
3586 bool ret = true;
3588 HOST_WIDE_INT set_masks;
3589 HOST_WIDE_INT ignore_masks;
3590 int cpu_index = -1;
3591 int tune_index;
3592 struct cl_target_option *main_target_opt
3593 = ((global_init_p || target_option_default_node == NULL)
3594 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3596 /* Print defaults. */
3597 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3598 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3600 /* Remember the explicit arguments. */
3601 if (global_init_p)
3602 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3604 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3605 library functions, so warn about it. The flag may be useful for
3606 performance studies from time to time though, so don't disable it
3607 entirely. */
3608 if (global_options_set.x_rs6000_alignment_flags
3609 && rs6000_alignment_flags == MASK_ALIGN_POWER
3610 && DEFAULT_ABI == ABI_DARWIN
3611 && TARGET_64BIT)
3612 warning (0, "%qs is not supported for 64-bit Darwin;"
3613 " it is incompatible with the installed C and C++ libraries",
3614 "-malign-power");
3616 /* Numerous experiment shows that IRA based loop pressure
3617 calculation works better for RTL loop invariant motion on targets
3618 with enough (>= 32) registers. It is an expensive optimization.
3619 So it is on only for peak performance. */
3620 if (optimize >= 3 && global_init_p
3621 && !global_options_set.x_flag_ira_loop_pressure)
3622 flag_ira_loop_pressure = 1;
3624 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3625 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3626 options were already specified. */
3627 if (flag_sanitize & SANITIZE_USER_ADDRESS
3628 && !global_options_set.x_flag_asynchronous_unwind_tables)
3629 flag_asynchronous_unwind_tables = 1;
3631 /* -fvariable-expansion-in-unroller is a win for POWER whenever the
3632 loop unroller is active. It is only checked during unrolling, so
3633 we can just set it on by default. */
3634 if (!global_options_set.x_flag_variable_expansion_in_unroller)
3635 flag_variable_expansion_in_unroller = 1;
3637 /* Set the pointer size. */
3638 if (TARGET_64BIT)
3640 rs6000_pmode = DImode;
3641 rs6000_pointer_size = 64;
3643 else
3645 rs6000_pmode = SImode;
3646 rs6000_pointer_size = 32;
3649 /* Some OSs don't support saving the high part of 64-bit registers on context
3650 switch. Other OSs don't support saving Altivec registers. On those OSs,
3651 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3652 if the user wants either, the user must explicitly specify them and we
3653 won't interfere with the user's specification. */
3655 set_masks = POWERPC_MASKS;
3656 #ifdef OS_MISSING_POWERPC64
3657 if (OS_MISSING_POWERPC64)
3658 set_masks &= ~OPTION_MASK_POWERPC64;
3659 #endif
3660 #ifdef OS_MISSING_ALTIVEC
3661 if (OS_MISSING_ALTIVEC)
3662 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3663 | OTHER_VSX_VECTOR_MASKS);
3664 #endif
3666 /* Don't override by the processor default if given explicitly. */
3667 set_masks &= ~rs6000_isa_flags_explicit;
3669 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3670 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3672 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3673 the cpu in a target attribute or pragma, but did not specify a tuning
3674 option, use the cpu for the tuning option rather than the option specified
3675 with -mtune on the command line. Process a '--with-cpu' configuration
3676 request as an implicit --cpu. */
3677 if (rs6000_cpu_index >= 0)
3678 cpu_index = rs6000_cpu_index;
3679 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3680 cpu_index = main_target_opt->x_rs6000_cpu_index;
3681 else if (OPTION_TARGET_CPU_DEFAULT)
3682 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3684 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3685 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3686 with those from the cpu, except for options that were explicitly set. If
3687 we don't have a cpu, do not override the target bits set in
3688 TARGET_DEFAULT. */
3689 if (cpu_index >= 0)
3691 rs6000_cpu_index = cpu_index;
3692 rs6000_isa_flags &= ~set_masks;
3693 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3694 & set_masks);
3696 else
3698 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3699 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3700 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3701 to using rs6000_isa_flags, we need to do the initialization here.
3703 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3704 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3705 HOST_WIDE_INT flags;
3706 if (TARGET_DEFAULT)
3707 flags = TARGET_DEFAULT;
3708 else
3710 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3711 const char *default_cpu = (!TARGET_POWERPC64
3712 ? "powerpc"
3713 : (BYTES_BIG_ENDIAN
3714 ? "powerpc64"
3715 : "powerpc64le"));
3716 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3717 flags = processor_target_table[default_cpu_index].target_enable;
3719 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3722 if (rs6000_tune_index >= 0)
3723 tune_index = rs6000_tune_index;
3724 else if (cpu_index >= 0)
3725 rs6000_tune_index = tune_index = cpu_index;
3726 else
3728 size_t i;
3729 enum processor_type tune_proc
3730 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3732 tune_index = -1;
3733 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3734 if (processor_target_table[i].processor == tune_proc)
3736 tune_index = i;
3737 break;
3741 if (cpu_index >= 0)
3742 rs6000_cpu = processor_target_table[cpu_index].processor;
3743 else
3744 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3746 gcc_assert (tune_index >= 0);
3747 rs6000_tune = processor_target_table[tune_index].processor;
3749 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3750 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3751 || rs6000_cpu == PROCESSOR_PPCE5500)
3753 if (TARGET_ALTIVEC)
3754 error ("AltiVec not supported in this target");
3757 /* If we are optimizing big endian systems for space, use the load/store
3758 multiple instructions. */
3759 if (BYTES_BIG_ENDIAN && optimize_size)
3760 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3762 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3763 because the hardware doesn't support the instructions used in little
3764 endian mode, and causes an alignment trap. The 750 does not cause an
3765 alignment trap (except when the target is unaligned). */
3767 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3769 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3770 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3771 warning (0, "%qs is not supported on little endian systems",
3772 "-mmultiple");
3775 /* If little-endian, default to -mstrict-align on older processors.
3776 Testing for htm matches power8 and later. */
3777 if (!BYTES_BIG_ENDIAN
3778 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3779 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3781 if (!rs6000_fold_gimple)
3782 fprintf (stderr,
3783 "gimple folding of rs6000 builtins has been disabled.\n");
3785 /* Add some warnings for VSX. */
3786 if (TARGET_VSX)
3788 const char *msg = NULL;
3789 if (!TARGET_HARD_FLOAT)
3791 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3792 msg = N_("%<-mvsx%> requires hardware floating point");
3793 else
3795 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3796 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3799 else if (TARGET_AVOID_XFORM > 0)
3800 msg = N_("%<-mvsx%> needs indexed addressing");
3801 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3802 & OPTION_MASK_ALTIVEC))
3804 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3805 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3806 else
3807 msg = N_("%<-mno-altivec%> disables vsx");
3810 if (msg)
3812 warning (0, msg);
3813 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3814 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3818 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3819 the -mcpu setting to enable options that conflict. */
3820 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3821 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3822 | OPTION_MASK_ALTIVEC
3823 | OPTION_MASK_VSX)) != 0)
3824 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3825 | OPTION_MASK_DIRECT_MOVE)
3826 & ~rs6000_isa_flags_explicit);
3828 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3829 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3831 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3832 off all of the options that depend on those flags. */
3833 ignore_masks = rs6000_disable_incompatible_switches ();
3835 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3836 unless the user explicitly used the -mno-<option> to disable the code. */
3837 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3838 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3839 else if (TARGET_P9_MINMAX)
3841 if (cpu_index >= 0)
3843 if (cpu_index == PROCESSOR_POWER9)
3845 /* legacy behavior: allow -mcpu=power9 with certain
3846 capabilities explicitly disabled. */
3847 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3849 else
3850 error ("power9 target option is incompatible with %<%s=<xxx>%> "
3851 "for <xxx> less than power9", "-mcpu");
3853 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
3854 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
3855 & rs6000_isa_flags_explicit))
3856 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
3857 were explicitly cleared. */
3858 error ("%qs incompatible with explicitly disabled options",
3859 "-mpower9-minmax");
3860 else
3861 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
3863 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3864 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
3865 else if (TARGET_VSX)
3866 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
3867 else if (TARGET_POPCNTD)
3868 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
3869 else if (TARGET_DFP)
3870 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
3871 else if (TARGET_CMPB)
3872 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
3873 else if (TARGET_FPRND)
3874 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
3875 else if (TARGET_POPCNTB)
3876 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
3877 else if (TARGET_ALTIVEC)
3878 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
3880 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3882 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3883 error ("%qs requires %qs", "-mcrypto", "-maltivec");
3884 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3887 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3889 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3890 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
3891 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3894 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3896 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3897 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
3898 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3901 if (TARGET_P8_VECTOR && !TARGET_VSX)
3903 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3904 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
3905 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
3906 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
3908 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3909 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3910 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
3912 else
3914 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
3915 not explicit. */
3916 rs6000_isa_flags |= OPTION_MASK_VSX;
3917 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3921 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3923 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3924 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
3925 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3928 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3929 silently turn off quad memory mode. */
3930 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3932 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3933 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
3935 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3936 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
3938 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3939 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3942 /* Non-atomic quad memory load/store are disabled for little endian, since
3943 the words are reversed, but atomic operations can still be done by
3944 swapping the words. */
3945 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3947 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3948 warning (0, N_("%<-mquad-memory%> is not available in little endian "
3949 "mode"));
3951 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3954 /* Assume if the user asked for normal quad memory instructions, they want
3955 the atomic versions as well, unless they explicity told us not to use quad
3956 word atomic instructions. */
3957 if (TARGET_QUAD_MEMORY
3958 && !TARGET_QUAD_MEMORY_ATOMIC
3959 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3960 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3962 /* If we can shrink-wrap the TOC register save separately, then use
3963 -msave-toc-indirect unless explicitly disabled. */
3964 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
3965 && flag_shrink_wrap_separate
3966 && optimize_function_for_speed_p (cfun))
3967 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
3969 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3970 generating power8 instructions. Power9 does not optimize power8 fusion
3971 cases. */
3972 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3974 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
3975 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3976 else
3977 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3980 /* Setting additional fusion flags turns on base fusion. */
3981 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
3983 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
3985 if (TARGET_P8_FUSION_SIGN)
3986 error ("%qs requires %qs", "-mpower8-fusion-sign",
3987 "-mpower8-fusion");
3989 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3991 else
3992 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3995 /* Power8 does not fuse sign extended loads with the addis. If we are
3996 optimizing at high levels for speed, convert a sign extended load into a
3997 zero extending load, and an explicit sign extension. */
3998 if (TARGET_P8_FUSION
3999 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4000 && optimize_function_for_speed_p (cfun)
4001 && optimize >= 3)
4002 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4004 /* ISA 3.0 vector instructions include ISA 2.07. */
4005 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4007 /* We prefer to not mention undocumented options in
4008 error messages. However, if users have managed to select
4009 power9-vector without selecting power8-vector, they
4010 already know about undocumented flags. */
4011 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4012 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4013 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4014 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4016 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4017 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4018 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4020 else
4022 /* OPTION_MASK_P9_VECTOR is explicit and
4023 OPTION_MASK_P8_VECTOR is not explicit. */
4024 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4025 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4029 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4030 support. If we only have ISA 2.06 support, and the user did not specify
4031 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4032 but we don't enable the full vectorization support */
4033 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4034 TARGET_ALLOW_MOVMISALIGN = 1;
4036 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4038 if (TARGET_ALLOW_MOVMISALIGN > 0
4039 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4040 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4042 TARGET_ALLOW_MOVMISALIGN = 0;
4045 /* Determine when unaligned vector accesses are permitted, and when
4046 they are preferred over masked Altivec loads. Note that if
4047 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4048 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4049 not true. */
4050 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4052 if (!TARGET_VSX)
4054 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4055 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4057 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4060 else if (!TARGET_ALLOW_MOVMISALIGN)
4062 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4063 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4064 "-mallow-movmisalign");
4066 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4070 /* Use long double size to select the appropriate long double. We use
4071 TYPE_PRECISION to differentiate the 3 different long double types. We map
4072 128 into the precision used for TFmode. */
4073 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4074 ? 64
4075 : FLOAT_PRECISION_TFmode);
4077 /* Set long double size before the IEEE 128-bit tests. */
4078 if (!global_options_set.x_rs6000_long_double_type_size)
4080 if (main_target_opt != NULL
4081 && (main_target_opt->x_rs6000_long_double_type_size
4082 != default_long_double_size))
4083 error ("target attribute or pragma changes %<long double%> size");
4084 else
4085 rs6000_long_double_type_size = default_long_double_size;
4087 else if (rs6000_long_double_type_size == 128)
4088 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4089 else if (global_options_set.x_rs6000_ieeequad)
4091 if (global_options.x_rs6000_ieeequad)
4092 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4093 else
4094 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4097 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4098 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4099 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4100 those systems will not pick up this default. Warn if the user changes the
4101 default unless -Wno-psabi. */
4102 if (!global_options_set.x_rs6000_ieeequad)
4103 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4105 else
4107 if (global_options.x_rs6000_ieeequad
4108 && (!TARGET_POPCNTD || !TARGET_VSX))
4109 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4111 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4113 static bool warned_change_long_double;
4114 if (!warned_change_long_double)
4116 warned_change_long_double = true;
4117 if (TARGET_IEEEQUAD)
4118 warning (OPT_Wpsabi, "Using IEEE extended precision "
4119 "%<long double%>");
4120 else
4121 warning (OPT_Wpsabi, "Using IBM extended precision "
4122 "%<long double%>");
4127 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4128 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4129 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4130 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4131 the keyword as well as the type. */
4132 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4134 /* IEEE 128-bit floating point requires VSX support. */
4135 if (TARGET_FLOAT128_KEYWORD)
4137 if (!TARGET_VSX)
4139 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4140 error ("%qs requires VSX support", "%<-mfloat128%>");
4142 TARGET_FLOAT128_TYPE = 0;
4143 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4144 | OPTION_MASK_FLOAT128_HW);
4146 else if (!TARGET_FLOAT128_TYPE)
4148 TARGET_FLOAT128_TYPE = 1;
4149 warning (0, "The %<-mfloat128%> option may not be fully supported");
4153 /* Enable the __float128 keyword under Linux by default. */
4154 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4155 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4156 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4158 /* If we have are supporting the float128 type and full ISA 3.0 support,
4159 enable -mfloat128-hardware by default. However, don't enable the
4160 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4161 because sometimes the compiler wants to put things in an integer
4162 container, and if we don't have __int128 support, it is impossible. */
4163 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4164 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4165 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4166 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4168 if (TARGET_FLOAT128_HW
4169 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4171 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4172 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4174 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4177 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4179 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4180 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4182 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4185 /* -mprefixed-addr (and hence -mpcrel) requires -mcpu=future. */
4186 if (TARGET_PREFIXED_ADDR && !TARGET_FUTURE)
4188 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4189 error ("%qs requires %qs", "-mpcrel", "-mcpu=future");
4190 else if ((rs6000_isa_flags_explicit & OPTION_MASK_PREFIXED_ADDR) != 0)
4191 error ("%qs requires %qs", "-mprefixed-addr", "-mcpu=future");
4193 rs6000_isa_flags &= ~(OPTION_MASK_PCREL | OPTION_MASK_PREFIXED_ADDR);
4196 /* -mpcrel requires prefixed load/store addressing. */
4197 if (TARGET_PCREL && !TARGET_PREFIXED_ADDR)
4199 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4200 error ("%qs requires %qs", "-mpcrel", "-mprefixed-addr");
4202 rs6000_isa_flags &= ~OPTION_MASK_PCREL;
4205 /* Print the options after updating the defaults. */
4206 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4207 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4209 /* E500mc does "better" if we inline more aggressively. Respect the
4210 user's opinion, though. */
4211 if (rs6000_block_move_inline_limit == 0
4212 && (rs6000_tune == PROCESSOR_PPCE500MC
4213 || rs6000_tune == PROCESSOR_PPCE500MC64
4214 || rs6000_tune == PROCESSOR_PPCE5500
4215 || rs6000_tune == PROCESSOR_PPCE6500))
4216 rs6000_block_move_inline_limit = 128;
4218 /* store_one_arg depends on expand_block_move to handle at least the
4219 size of reg_parm_stack_space. */
4220 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4221 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4223 if (global_init_p)
4225 /* If the appropriate debug option is enabled, replace the target hooks
4226 with debug versions that call the real version and then prints
4227 debugging information. */
4228 if (TARGET_DEBUG_COST)
4230 targetm.rtx_costs = rs6000_debug_rtx_costs;
4231 targetm.address_cost = rs6000_debug_address_cost;
4232 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4235 if (TARGET_DEBUG_ADDR)
4237 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4238 targetm.legitimize_address = rs6000_debug_legitimize_address;
4239 rs6000_secondary_reload_class_ptr
4240 = rs6000_debug_secondary_reload_class;
4241 targetm.secondary_memory_needed
4242 = rs6000_debug_secondary_memory_needed;
4243 targetm.can_change_mode_class
4244 = rs6000_debug_can_change_mode_class;
4245 rs6000_preferred_reload_class_ptr
4246 = rs6000_debug_preferred_reload_class;
4247 rs6000_mode_dependent_address_ptr
4248 = rs6000_debug_mode_dependent_address;
4251 if (rs6000_veclibabi_name)
4253 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4254 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4255 else
4257 error ("unknown vectorization library ABI type (%qs) for "
4258 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4259 ret = false;
4264 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4265 target attribute or pragma which automatically enables both options,
4266 unless the altivec ABI was set. This is set by default for 64-bit, but
4267 not for 32-bit. */
4268 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4270 TARGET_FLOAT128_TYPE = 0;
4271 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4272 | OPTION_MASK_FLOAT128_KEYWORD)
4273 & ~rs6000_isa_flags_explicit);
4276 /* Enable Altivec ABI for AIX -maltivec. */
4277 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4279 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4280 error ("target attribute or pragma changes AltiVec ABI");
4281 else
4282 rs6000_altivec_abi = 1;
4285 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4286 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4287 be explicitly overridden in either case. */
4288 if (TARGET_ELF)
4290 if (!global_options_set.x_rs6000_altivec_abi
4291 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4293 if (main_target_opt != NULL &&
4294 !main_target_opt->x_rs6000_altivec_abi)
4295 error ("target attribute or pragma changes AltiVec ABI");
4296 else
4297 rs6000_altivec_abi = 1;
4301 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4302 So far, the only darwin64 targets are also MACH-O. */
4303 if (TARGET_MACHO
4304 && DEFAULT_ABI == ABI_DARWIN
4305 && TARGET_64BIT)
4307 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4308 error ("target attribute or pragma changes darwin64 ABI");
4309 else
4311 rs6000_darwin64_abi = 1;
4312 /* Default to natural alignment, for better performance. */
4313 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4317 /* Place FP constants in the constant pool instead of TOC
4318 if section anchors enabled. */
4319 if (flag_section_anchors
4320 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4321 TARGET_NO_FP_IN_TOC = 1;
4323 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4324 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4326 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4327 SUBTARGET_OVERRIDE_OPTIONS;
4328 #endif
4329 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4330 SUBSUBTARGET_OVERRIDE_OPTIONS;
4331 #endif
4332 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4333 SUB3TARGET_OVERRIDE_OPTIONS;
4334 #endif
4336 /* -mpcrel requires -mcmodel=medium, but we can't check TARGET_CMODEL until
4337 after the subtarget override options are done. */
4338 if (TARGET_PCREL && TARGET_CMODEL != CMODEL_MEDIUM)
4340 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4341 error ("%qs requires %qs", "-mpcrel", "-mcmodel=medium");
4343 rs6000_isa_flags &= ~OPTION_MASK_PCREL;
4346 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4347 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4349 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4350 && rs6000_tune != PROCESSOR_POWER5
4351 && rs6000_tune != PROCESSOR_POWER6
4352 && rs6000_tune != PROCESSOR_POWER7
4353 && rs6000_tune != PROCESSOR_POWER8
4354 && rs6000_tune != PROCESSOR_POWER9
4355 && rs6000_tune != PROCESSOR_FUTURE
4356 && rs6000_tune != PROCESSOR_PPCA2
4357 && rs6000_tune != PROCESSOR_CELL
4358 && rs6000_tune != PROCESSOR_PPC476);
4359 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4360 || rs6000_tune == PROCESSOR_POWER5
4361 || rs6000_tune == PROCESSOR_POWER7
4362 || rs6000_tune == PROCESSOR_POWER8);
4363 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4364 || rs6000_tune == PROCESSOR_POWER5
4365 || rs6000_tune == PROCESSOR_POWER6
4366 || rs6000_tune == PROCESSOR_POWER7
4367 || rs6000_tune == PROCESSOR_POWER8
4368 || rs6000_tune == PROCESSOR_POWER9
4369 || rs6000_tune == PROCESSOR_FUTURE
4370 || rs6000_tune == PROCESSOR_PPCE500MC
4371 || rs6000_tune == PROCESSOR_PPCE500MC64
4372 || rs6000_tune == PROCESSOR_PPCE5500
4373 || rs6000_tune == PROCESSOR_PPCE6500);
4375 /* Allow debug switches to override the above settings. These are set to -1
4376 in rs6000.opt to indicate the user hasn't directly set the switch. */
4377 if (TARGET_ALWAYS_HINT >= 0)
4378 rs6000_always_hint = TARGET_ALWAYS_HINT;
4380 if (TARGET_SCHED_GROUPS >= 0)
4381 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4383 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4384 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4386 rs6000_sched_restricted_insns_priority
4387 = (rs6000_sched_groups ? 1 : 0);
4389 /* Handle -msched-costly-dep option. */
4390 rs6000_sched_costly_dep
4391 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4393 if (rs6000_sched_costly_dep_str)
4395 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4396 rs6000_sched_costly_dep = no_dep_costly;
4397 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4398 rs6000_sched_costly_dep = all_deps_costly;
4399 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4400 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4401 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4402 rs6000_sched_costly_dep = store_to_load_dep_costly;
4403 else
4404 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4405 atoi (rs6000_sched_costly_dep_str));
4408 /* Handle -minsert-sched-nops option. */
4409 rs6000_sched_insert_nops
4410 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4412 if (rs6000_sched_insert_nops_str)
4414 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4415 rs6000_sched_insert_nops = sched_finish_none;
4416 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4417 rs6000_sched_insert_nops = sched_finish_pad_groups;
4418 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4419 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4420 else
4421 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4422 atoi (rs6000_sched_insert_nops_str));
4425 /* Handle stack protector */
4426 if (!global_options_set.x_rs6000_stack_protector_guard)
4427 #ifdef TARGET_THREAD_SSP_OFFSET
4428 rs6000_stack_protector_guard = SSP_TLS;
4429 #else
4430 rs6000_stack_protector_guard = SSP_GLOBAL;
4431 #endif
4433 #ifdef TARGET_THREAD_SSP_OFFSET
4434 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4435 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4436 #endif
4438 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4440 char *endp;
4441 const char *str = rs6000_stack_protector_guard_offset_str;
4443 errno = 0;
4444 long offset = strtol (str, &endp, 0);
4445 if (!*str || *endp || errno)
4446 error ("%qs is not a valid number in %qs", str,
4447 "-mstack-protector-guard-offset=");
4449 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4450 || (TARGET_64BIT && (offset & 3)))
4451 error ("%qs is not a valid offset in %qs", str,
4452 "-mstack-protector-guard-offset=");
4454 rs6000_stack_protector_guard_offset = offset;
4457 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4459 const char *str = rs6000_stack_protector_guard_reg_str;
4460 int reg = decode_reg_name (str);
4462 if (!IN_RANGE (reg, 1, 31))
4463 error ("%qs is not a valid base register in %qs", str,
4464 "-mstack-protector-guard-reg=");
4466 rs6000_stack_protector_guard_reg = reg;
4469 if (rs6000_stack_protector_guard == SSP_TLS
4470 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4471 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4473 if (global_init_p)
4475 #ifdef TARGET_REGNAMES
4476 /* If the user desires alternate register names, copy in the
4477 alternate names now. */
4478 if (TARGET_REGNAMES)
4479 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4480 #endif
4482 /* Set aix_struct_return last, after the ABI is determined.
4483 If -maix-struct-return or -msvr4-struct-return was explicitly
4484 used, don't override with the ABI default. */
4485 if (!global_options_set.x_aix_struct_return)
4486 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4488 #if 0
4489 /* IBM XL compiler defaults to unsigned bitfields. */
4490 if (TARGET_XL_COMPAT)
4491 flag_signed_bitfields = 0;
4492 #endif
4494 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4495 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4497 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4499 /* We can only guarantee the availability of DI pseudo-ops when
4500 assembling for 64-bit targets. */
4501 if (!TARGET_64BIT)
4503 targetm.asm_out.aligned_op.di = NULL;
4504 targetm.asm_out.unaligned_op.di = NULL;
4508 /* Set branch target alignment, if not optimizing for size. */
4509 if (!optimize_size)
4511 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4512 aligned 8byte to avoid misprediction by the branch predictor. */
4513 if (rs6000_tune == PROCESSOR_TITAN
4514 || rs6000_tune == PROCESSOR_CELL)
4516 if (flag_align_functions && !str_align_functions)
4517 str_align_functions = "8";
4518 if (flag_align_jumps && !str_align_jumps)
4519 str_align_jumps = "8";
4520 if (flag_align_loops && !str_align_loops)
4521 str_align_loops = "8";
4523 if (rs6000_align_branch_targets)
4525 if (flag_align_functions && !str_align_functions)
4526 str_align_functions = "16";
4527 if (flag_align_jumps && !str_align_jumps)
4528 str_align_jumps = "16";
4529 if (flag_align_loops && !str_align_loops)
4531 can_override_loop_align = 1;
4532 str_align_loops = "16";
4536 if (flag_align_jumps && !str_align_jumps)
4537 str_align_jumps = "16";
4538 if (flag_align_loops && !str_align_loops)
4539 str_align_loops = "16";
4542 /* Arrange to save and restore machine status around nested functions. */
4543 init_machine_status = rs6000_init_machine_status;
4545 /* We should always be splitting complex arguments, but we can't break
4546 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4547 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4548 targetm.calls.split_complex_arg = NULL;
4550 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4551 if (DEFAULT_ABI == ABI_AIX)
4552 targetm.calls.custom_function_descriptors = 0;
4555 /* Initialize rs6000_cost with the appropriate target costs. */
4556 if (optimize_size)
4557 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4558 else
4559 switch (rs6000_tune)
4561 case PROCESSOR_RS64A:
4562 rs6000_cost = &rs64a_cost;
4563 break;
4565 case PROCESSOR_MPCCORE:
4566 rs6000_cost = &mpccore_cost;
4567 break;
4569 case PROCESSOR_PPC403:
4570 rs6000_cost = &ppc403_cost;
4571 break;
4573 case PROCESSOR_PPC405:
4574 rs6000_cost = &ppc405_cost;
4575 break;
4577 case PROCESSOR_PPC440:
4578 rs6000_cost = &ppc440_cost;
4579 break;
4581 case PROCESSOR_PPC476:
4582 rs6000_cost = &ppc476_cost;
4583 break;
4585 case PROCESSOR_PPC601:
4586 rs6000_cost = &ppc601_cost;
4587 break;
4589 case PROCESSOR_PPC603:
4590 rs6000_cost = &ppc603_cost;
4591 break;
4593 case PROCESSOR_PPC604:
4594 rs6000_cost = &ppc604_cost;
4595 break;
4597 case PROCESSOR_PPC604e:
4598 rs6000_cost = &ppc604e_cost;
4599 break;
4601 case PROCESSOR_PPC620:
4602 rs6000_cost = &ppc620_cost;
4603 break;
4605 case PROCESSOR_PPC630:
4606 rs6000_cost = &ppc630_cost;
4607 break;
4609 case PROCESSOR_CELL:
4610 rs6000_cost = &ppccell_cost;
4611 break;
4613 case PROCESSOR_PPC750:
4614 case PROCESSOR_PPC7400:
4615 rs6000_cost = &ppc750_cost;
4616 break;
4618 case PROCESSOR_PPC7450:
4619 rs6000_cost = &ppc7450_cost;
4620 break;
4622 case PROCESSOR_PPC8540:
4623 case PROCESSOR_PPC8548:
4624 rs6000_cost = &ppc8540_cost;
4625 break;
4627 case PROCESSOR_PPCE300C2:
4628 case PROCESSOR_PPCE300C3:
4629 rs6000_cost = &ppce300c2c3_cost;
4630 break;
4632 case PROCESSOR_PPCE500MC:
4633 rs6000_cost = &ppce500mc_cost;
4634 break;
4636 case PROCESSOR_PPCE500MC64:
4637 rs6000_cost = &ppce500mc64_cost;
4638 break;
4640 case PROCESSOR_PPCE5500:
4641 rs6000_cost = &ppce5500_cost;
4642 break;
4644 case PROCESSOR_PPCE6500:
4645 rs6000_cost = &ppce6500_cost;
4646 break;
4648 case PROCESSOR_TITAN:
4649 rs6000_cost = &titan_cost;
4650 break;
4652 case PROCESSOR_POWER4:
4653 case PROCESSOR_POWER5:
4654 rs6000_cost = &power4_cost;
4655 break;
4657 case PROCESSOR_POWER6:
4658 rs6000_cost = &power6_cost;
4659 break;
4661 case PROCESSOR_POWER7:
4662 rs6000_cost = &power7_cost;
4663 break;
4665 case PROCESSOR_POWER8:
4666 rs6000_cost = &power8_cost;
4667 break;
4669 case PROCESSOR_POWER9:
4670 case PROCESSOR_FUTURE:
4671 rs6000_cost = &power9_cost;
4672 break;
4674 case PROCESSOR_PPCA2:
4675 rs6000_cost = &ppca2_cost;
4676 break;
4678 default:
4679 gcc_unreachable ();
4682 if (global_init_p)
4684 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4685 rs6000_cost->simultaneous_prefetches,
4686 global_options.x_param_values,
4687 global_options_set.x_param_values);
4688 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4689 global_options.x_param_values,
4690 global_options_set.x_param_values);
4691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4692 rs6000_cost->cache_line_size,
4693 global_options.x_param_values,
4694 global_options_set.x_param_values);
4695 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4696 global_options.x_param_values,
4697 global_options_set.x_param_values);
4699 /* Increase loop peeling limits based on performance analysis. */
4700 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4701 global_options.x_param_values,
4702 global_options_set.x_param_values);
4703 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4704 global_options.x_param_values,
4705 global_options_set.x_param_values);
4707 /* Use the 'model' -fsched-pressure algorithm by default. */
4708 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4709 SCHED_PRESSURE_MODEL,
4710 global_options.x_param_values,
4711 global_options_set.x_param_values);
4713 /* If using typedef char *va_list, signal that
4714 __builtin_va_start (&ap, 0) can be optimized to
4715 ap = __builtin_next_arg (0). */
4716 if (DEFAULT_ABI != ABI_V4)
4717 targetm.expand_builtin_va_start = NULL;
4720 /* If not explicitly specified via option, decide whether to generate indexed
4721 load/store instructions. A value of -1 indicates that the
4722 initial value of this variable has not been overwritten. During
4723 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4724 if (TARGET_AVOID_XFORM == -1)
4725 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4726 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4727 need indexed accesses and the type used is the scalar type of the element
4728 being loaded or stored. */
4729 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4730 && !TARGET_ALTIVEC);
4732 /* Set the -mrecip options. */
4733 if (rs6000_recip_name)
4735 char *p = ASTRDUP (rs6000_recip_name);
4736 char *q;
4737 unsigned int mask, i;
4738 bool invert;
4740 while ((q = strtok (p, ",")) != NULL)
4742 p = NULL;
4743 if (*q == '!')
4745 invert = true;
4746 q++;
4748 else
4749 invert = false;
4751 if (!strcmp (q, "default"))
4752 mask = ((TARGET_RECIP_PRECISION)
4753 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4754 else
4756 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4757 if (!strcmp (q, recip_options[i].string))
4759 mask = recip_options[i].mask;
4760 break;
4763 if (i == ARRAY_SIZE (recip_options))
4765 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4766 invert = false;
4767 mask = 0;
4768 ret = false;
4772 if (invert)
4773 rs6000_recip_control &= ~mask;
4774 else
4775 rs6000_recip_control |= mask;
4779 /* Set the builtin mask of the various options used that could affect which
4780 builtins were used. In the past we used target_flags, but we've run out
4781 of bits, and some options are no longer in target_flags. */
4782 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4783 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4784 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4785 rs6000_builtin_mask);
4787 /* Initialize all of the registers. */
4788 rs6000_init_hard_regno_mode_ok (global_init_p);
4790 /* Save the initial options in case the user does function specific options */
4791 if (global_init_p)
4792 target_option_default_node = target_option_current_node
4793 = build_target_option_node (&global_options);
4795 /* If not explicitly specified via option, decide whether to generate the
4796 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4797 if (TARGET_LINK_STACK == -1)
4798 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4800 /* Deprecate use of -mno-speculate-indirect-jumps. */
4801 if (!rs6000_speculate_indirect_jumps)
4802 warning (0, "%qs is deprecated and not recommended in any circumstances",
4803 "-mno-speculate-indirect-jumps");
4805 return ret;
4808 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4809 define the target cpu type. */
4811 static void
4812 rs6000_option_override (void)
4814 (void) rs6000_option_override_internal (true);
4818 /* Implement targetm.vectorize.builtin_mask_for_load. */
4819 static tree
4820 rs6000_builtin_mask_for_load (void)
4822 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4823 if ((TARGET_ALTIVEC && !TARGET_VSX)
4824 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4825 return altivec_builtin_mask_for_load;
4826 else
4827 return 0;
4830 /* Implement LOOP_ALIGN. */
4831 align_flags
4832 rs6000_loop_align (rtx label)
4834 basic_block bb;
4835 int ninsns;
4837 /* Don't override loop alignment if -falign-loops was specified. */
4838 if (!can_override_loop_align)
4839 return align_loops;
4841 bb = BLOCK_FOR_INSN (label);
4842 ninsns = num_loop_insns(bb->loop_father);
4844 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4845 if (ninsns > 4 && ninsns <= 8
4846 && (rs6000_tune == PROCESSOR_POWER4
4847 || rs6000_tune == PROCESSOR_POWER5
4848 || rs6000_tune == PROCESSOR_POWER6
4849 || rs6000_tune == PROCESSOR_POWER7
4850 || rs6000_tune == PROCESSOR_POWER8))
4851 return align_flags (5);
4852 else
4853 return align_loops;
4856 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4857 after applying N number of iterations. This routine does not determine
4858 how may iterations are required to reach desired alignment. */
4860 static bool
4861 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4863 if (is_packed)
4864 return false;
4866 if (TARGET_32BIT)
4868 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4869 return true;
4871 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4872 return true;
4874 return false;
4876 else
4878 if (TARGET_MACHO)
4879 return false;
4881 /* Assuming that all other types are naturally aligned. CHECKME! */
4882 return true;
4886 /* Return true if the vector misalignment factor is supported by the
4887 target. */
4888 static bool
4889 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4890 const_tree type,
4891 int misalignment,
4892 bool is_packed)
4894 if (TARGET_VSX)
4896 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4897 return true;
4899 /* Return if movmisalign pattern is not supported for this mode. */
4900 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4901 return false;
4903 if (misalignment == -1)
4905 /* Misalignment factor is unknown at compile time but we know
4906 it's word aligned. */
4907 if (rs6000_vector_alignment_reachable (type, is_packed))
4909 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4911 if (element_size == 64 || element_size == 32)
4912 return true;
4915 return false;
4918 /* VSX supports word-aligned vector. */
4919 if (misalignment % 4 == 0)
4920 return true;
4922 return false;
4925 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4926 static int
4927 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4928 tree vectype, int misalign)
4930 unsigned elements;
4931 tree elem_type;
4933 switch (type_of_cost)
4935 case scalar_stmt:
4936 case scalar_load:
4937 case scalar_store:
4938 case vector_stmt:
4939 case vector_load:
4940 case vector_store:
4941 case vec_to_scalar:
4942 case scalar_to_vec:
4943 case cond_branch_not_taken:
4944 return 1;
4946 case vec_perm:
4947 if (TARGET_VSX)
4948 return 3;
4949 else
4950 return 1;
4952 case vec_promote_demote:
4953 if (TARGET_VSX)
4954 return 4;
4955 else
4956 return 1;
4958 case cond_branch_taken:
4959 return 3;
4961 case unaligned_load:
4962 case vector_gather_load:
4963 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4964 return 1;
4966 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4968 elements = TYPE_VECTOR_SUBPARTS (vectype);
4969 if (elements == 2)
4970 /* Double word aligned. */
4971 return 2;
4973 if (elements == 4)
4975 switch (misalign)
4977 case 8:
4978 /* Double word aligned. */
4979 return 2;
4981 case -1:
4982 /* Unknown misalignment. */
4983 case 4:
4984 case 12:
4985 /* Word aligned. */
4986 return 22;
4988 default:
4989 gcc_unreachable ();
4994 if (TARGET_ALTIVEC)
4995 /* Misaligned loads are not supported. */
4996 gcc_unreachable ();
4998 return 2;
5000 case unaligned_store:
5001 case vector_scatter_store:
5002 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5003 return 1;
5005 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5007 elements = TYPE_VECTOR_SUBPARTS (vectype);
5008 if (elements == 2)
5009 /* Double word aligned. */
5010 return 2;
5012 if (elements == 4)
5014 switch (misalign)
5016 case 8:
5017 /* Double word aligned. */
5018 return 2;
5020 case -1:
5021 /* Unknown misalignment. */
5022 case 4:
5023 case 12:
5024 /* Word aligned. */
5025 return 23;
5027 default:
5028 gcc_unreachable ();
5033 if (TARGET_ALTIVEC)
5034 /* Misaligned stores are not supported. */
5035 gcc_unreachable ();
5037 return 2;
5039 case vec_construct:
5040 /* This is a rough approximation assuming non-constant elements
5041 constructed into a vector via element insertion. FIXME:
5042 vec_construct is not granular enough for uniformly good
5043 decisions. If the initialization is a splat, this is
5044 cheaper than we estimate. Improve this someday. */
5045 elem_type = TREE_TYPE (vectype);
5046 /* 32-bit vectors loaded into registers are stored as double
5047 precision, so we need 2 permutes, 2 converts, and 1 merge
5048 to construct a vector of short floats from them. */
5049 if (SCALAR_FLOAT_TYPE_P (elem_type)
5050 && TYPE_PRECISION (elem_type) == 32)
5051 return 5;
5052 /* On POWER9, integer vector types are built up in GPRs and then
5053 use a direct move (2 cycles). For POWER8 this is even worse,
5054 as we need two direct moves and a merge, and the direct moves
5055 are five cycles. */
5056 else if (INTEGRAL_TYPE_P (elem_type))
5058 if (TARGET_P9_VECTOR)
5059 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5060 else
5061 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5063 else
5064 /* V2DFmode doesn't need a direct move. */
5065 return 2;
5067 default:
5068 gcc_unreachable ();
5072 /* Implement targetm.vectorize.preferred_simd_mode. */
5074 static machine_mode
5075 rs6000_preferred_simd_mode (scalar_mode mode)
5077 if (TARGET_VSX)
5078 switch (mode)
5080 case E_DFmode:
5081 return V2DFmode;
5082 default:;
5084 if (TARGET_ALTIVEC || TARGET_VSX)
5085 switch (mode)
5087 case E_SFmode:
5088 return V4SFmode;
5089 case E_TImode:
5090 return V1TImode;
5091 case E_DImode:
5092 return V2DImode;
5093 case E_SImode:
5094 return V4SImode;
5095 case E_HImode:
5096 return V8HImode;
5097 case E_QImode:
5098 return V16QImode;
5099 default:;
5101 return word_mode;
5104 typedef struct _rs6000_cost_data
5106 struct loop *loop_info;
5107 unsigned cost[3];
5108 } rs6000_cost_data;
5110 /* Test for likely overcommitment of vector hardware resources. If a
5111 loop iteration is relatively large, and too large a percentage of
5112 instructions in the loop are vectorized, the cost model may not
5113 adequately reflect delays from unavailable vector resources.
5114 Penalize the loop body cost for this case. */
5116 static void
5117 rs6000_density_test (rs6000_cost_data *data)
5119 const int DENSITY_PCT_THRESHOLD = 85;
5120 const int DENSITY_SIZE_THRESHOLD = 70;
5121 const int DENSITY_PENALTY = 10;
5122 struct loop *loop = data->loop_info;
5123 basic_block *bbs = get_loop_body (loop);
5124 int nbbs = loop->num_nodes;
5125 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5126 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5127 int i, density_pct;
5129 for (i = 0; i < nbbs; i++)
5131 basic_block bb = bbs[i];
5132 gimple_stmt_iterator gsi;
5134 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5136 gimple *stmt = gsi_stmt (gsi);
5137 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5139 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5140 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5141 not_vec_cost++;
5145 free (bbs);
5146 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5148 if (density_pct > DENSITY_PCT_THRESHOLD
5149 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5151 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5152 if (dump_enabled_p ())
5153 dump_printf_loc (MSG_NOTE, vect_location,
5154 "density %d%%, cost %d exceeds threshold, penalizing "
5155 "loop body cost by %d%%", density_pct,
5156 vec_cost + not_vec_cost, DENSITY_PENALTY);
5160 /* Implement targetm.vectorize.init_cost. */
5162 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5163 instruction is needed by the vectorization. */
5164 static bool rs6000_vect_nonmem;
5166 static void *
5167 rs6000_init_cost (struct loop *loop_info)
5169 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5170 data->loop_info = loop_info;
5171 data->cost[vect_prologue] = 0;
5172 data->cost[vect_body] = 0;
5173 data->cost[vect_epilogue] = 0;
5174 rs6000_vect_nonmem = false;
5175 return data;
5178 /* Implement targetm.vectorize.add_stmt_cost. */
5180 static unsigned
5181 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5182 struct _stmt_vec_info *stmt_info, int misalign,
5183 enum vect_cost_model_location where)
5185 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5186 unsigned retval = 0;
5188 if (flag_vect_cost_model)
5190 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5191 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5192 misalign);
5193 /* Statements in an inner loop relative to the loop being
5194 vectorized are weighted more heavily. The value here is
5195 arbitrary and could potentially be improved with analysis. */
5196 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5197 count *= 50; /* FIXME. */
5199 retval = (unsigned) (count * stmt_cost);
5200 cost_data->cost[where] += retval;
5202 /* Check whether we're doing something other than just a copy loop.
5203 Not all such loops may be profitably vectorized; see
5204 rs6000_finish_cost. */
5205 if ((kind == vec_to_scalar || kind == vec_perm
5206 || kind == vec_promote_demote || kind == vec_construct
5207 || kind == scalar_to_vec)
5208 || (where == vect_body && kind == vector_stmt))
5209 rs6000_vect_nonmem = true;
5212 return retval;
5215 /* Implement targetm.vectorize.finish_cost. */
5217 static void
5218 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5219 unsigned *body_cost, unsigned *epilogue_cost)
5221 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5223 if (cost_data->loop_info)
5224 rs6000_density_test (cost_data);
5226 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5227 that require versioning for any reason. The vectorization is at
5228 best a wash inside the loop, and the versioning checks make
5229 profitability highly unlikely and potentially quite harmful. */
5230 if (cost_data->loop_info)
5232 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5233 if (!rs6000_vect_nonmem
5234 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5235 && LOOP_REQUIRES_VERSIONING (vec_info))
5236 cost_data->cost[vect_body] += 10000;
5239 *prologue_cost = cost_data->cost[vect_prologue];
5240 *body_cost = cost_data->cost[vect_body];
5241 *epilogue_cost = cost_data->cost[vect_epilogue];
5244 /* Implement targetm.vectorize.destroy_cost_data. */
5246 static void
5247 rs6000_destroy_cost_data (void *data)
5249 free (data);
5252 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5253 library with vectorized intrinsics. */
5255 static tree
5256 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5257 tree type_in)
5259 char name[32];
5260 const char *suffix = NULL;
5261 tree fntype, new_fndecl, bdecl = NULL_TREE;
5262 int n_args = 1;
5263 const char *bname;
5264 machine_mode el_mode, in_mode;
5265 int n, in_n;
5267 /* Libmass is suitable for unsafe math only as it does not correctly support
5268 parts of IEEE with the required precision such as denormals. Only support
5269 it if we have VSX to use the simd d2 or f4 functions.
5270 XXX: Add variable length support. */
5271 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5272 return NULL_TREE;
5274 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5275 n = TYPE_VECTOR_SUBPARTS (type_out);
5276 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5277 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5278 if (el_mode != in_mode
5279 || n != in_n)
5280 return NULL_TREE;
5282 switch (fn)
5284 CASE_CFN_ATAN2:
5285 CASE_CFN_HYPOT:
5286 CASE_CFN_POW:
5287 n_args = 2;
5288 gcc_fallthrough ();
5290 CASE_CFN_ACOS:
5291 CASE_CFN_ACOSH:
5292 CASE_CFN_ASIN:
5293 CASE_CFN_ASINH:
5294 CASE_CFN_ATAN:
5295 CASE_CFN_ATANH:
5296 CASE_CFN_CBRT:
5297 CASE_CFN_COS:
5298 CASE_CFN_COSH:
5299 CASE_CFN_ERF:
5300 CASE_CFN_ERFC:
5301 CASE_CFN_EXP2:
5302 CASE_CFN_EXP:
5303 CASE_CFN_EXPM1:
5304 CASE_CFN_LGAMMA:
5305 CASE_CFN_LOG10:
5306 CASE_CFN_LOG1P:
5307 CASE_CFN_LOG2:
5308 CASE_CFN_LOG:
5309 CASE_CFN_SIN:
5310 CASE_CFN_SINH:
5311 CASE_CFN_SQRT:
5312 CASE_CFN_TAN:
5313 CASE_CFN_TANH:
5314 if (el_mode == DFmode && n == 2)
5316 bdecl = mathfn_built_in (double_type_node, fn);
5317 suffix = "d2"; /* pow -> powd2 */
5319 else if (el_mode == SFmode && n == 4)
5321 bdecl = mathfn_built_in (float_type_node, fn);
5322 suffix = "4"; /* powf -> powf4 */
5324 else
5325 return NULL_TREE;
5326 if (!bdecl)
5327 return NULL_TREE;
5328 break;
5330 default:
5331 return NULL_TREE;
5334 gcc_assert (suffix != NULL);
5335 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5336 if (!bname)
5337 return NULL_TREE;
5339 strcpy (name, bname + sizeof ("__builtin_") - 1);
5340 strcat (name, suffix);
5342 if (n_args == 1)
5343 fntype = build_function_type_list (type_out, type_in, NULL);
5344 else if (n_args == 2)
5345 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5346 else
5347 gcc_unreachable ();
5349 /* Build a function declaration for the vectorized function. */
5350 new_fndecl = build_decl (BUILTINS_LOCATION,
5351 FUNCTION_DECL, get_identifier (name), fntype);
5352 TREE_PUBLIC (new_fndecl) = 1;
5353 DECL_EXTERNAL (new_fndecl) = 1;
5354 DECL_IS_NOVOPS (new_fndecl) = 1;
5355 TREE_READONLY (new_fndecl) = 1;
5357 return new_fndecl;
5360 /* Returns a function decl for a vectorized version of the builtin function
5361 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5362 if it is not available. */
5364 static tree
5365 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5366 tree type_in)
5368 machine_mode in_mode, out_mode;
5369 int in_n, out_n;
5371 if (TARGET_DEBUG_BUILTIN)
5372 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5373 combined_fn_name (combined_fn (fn)),
5374 GET_MODE_NAME (TYPE_MODE (type_out)),
5375 GET_MODE_NAME (TYPE_MODE (type_in)));
5377 if (TREE_CODE (type_out) != VECTOR_TYPE
5378 || TREE_CODE (type_in) != VECTOR_TYPE)
5379 return NULL_TREE;
5381 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5382 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5383 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5384 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5386 switch (fn)
5388 CASE_CFN_COPYSIGN:
5389 if (VECTOR_UNIT_VSX_P (V2DFmode)
5390 && out_mode == DFmode && out_n == 2
5391 && in_mode == DFmode && in_n == 2)
5392 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5393 if (VECTOR_UNIT_VSX_P (V4SFmode)
5394 && out_mode == SFmode && out_n == 4
5395 && in_mode == SFmode && in_n == 4)
5396 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5397 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5398 && out_mode == SFmode && out_n == 4
5399 && in_mode == SFmode && in_n == 4)
5400 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5401 break;
5402 CASE_CFN_CEIL:
5403 if (VECTOR_UNIT_VSX_P (V2DFmode)
5404 && out_mode == DFmode && out_n == 2
5405 && in_mode == DFmode && in_n == 2)
5406 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5407 if (VECTOR_UNIT_VSX_P (V4SFmode)
5408 && out_mode == SFmode && out_n == 4
5409 && in_mode == SFmode && in_n == 4)
5410 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5411 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5412 && out_mode == SFmode && out_n == 4
5413 && in_mode == SFmode && in_n == 4)
5414 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5415 break;
5416 CASE_CFN_FLOOR:
5417 if (VECTOR_UNIT_VSX_P (V2DFmode)
5418 && out_mode == DFmode && out_n == 2
5419 && in_mode == DFmode && in_n == 2)
5420 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5421 if (VECTOR_UNIT_VSX_P (V4SFmode)
5422 && out_mode == SFmode && out_n == 4
5423 && in_mode == SFmode && in_n == 4)
5424 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5425 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5426 && out_mode == SFmode && out_n == 4
5427 && in_mode == SFmode && in_n == 4)
5428 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5429 break;
5430 CASE_CFN_FMA:
5431 if (VECTOR_UNIT_VSX_P (V2DFmode)
5432 && out_mode == DFmode && out_n == 2
5433 && in_mode == DFmode && in_n == 2)
5434 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5435 if (VECTOR_UNIT_VSX_P (V4SFmode)
5436 && out_mode == SFmode && out_n == 4
5437 && in_mode == SFmode && in_n == 4)
5438 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5439 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5440 && out_mode == SFmode && out_n == 4
5441 && in_mode == SFmode && in_n == 4)
5442 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5443 break;
5444 CASE_CFN_TRUNC:
5445 if (VECTOR_UNIT_VSX_P (V2DFmode)
5446 && out_mode == DFmode && out_n == 2
5447 && in_mode == DFmode && in_n == 2)
5448 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5449 if (VECTOR_UNIT_VSX_P (V4SFmode)
5450 && out_mode == SFmode && out_n == 4
5451 && in_mode == SFmode && in_n == 4)
5452 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5453 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5454 && out_mode == SFmode && out_n == 4
5455 && in_mode == SFmode && in_n == 4)
5456 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5457 break;
5458 CASE_CFN_NEARBYINT:
5459 if (VECTOR_UNIT_VSX_P (V2DFmode)
5460 && flag_unsafe_math_optimizations
5461 && out_mode == DFmode && out_n == 2
5462 && in_mode == DFmode && in_n == 2)
5463 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5464 if (VECTOR_UNIT_VSX_P (V4SFmode)
5465 && flag_unsafe_math_optimizations
5466 && out_mode == SFmode && out_n == 4
5467 && in_mode == SFmode && in_n == 4)
5468 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5469 break;
5470 CASE_CFN_RINT:
5471 if (VECTOR_UNIT_VSX_P (V2DFmode)
5472 && !flag_trapping_math
5473 && out_mode == DFmode && out_n == 2
5474 && in_mode == DFmode && in_n == 2)
5475 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5476 if (VECTOR_UNIT_VSX_P (V4SFmode)
5477 && !flag_trapping_math
5478 && out_mode == SFmode && out_n == 4
5479 && in_mode == SFmode && in_n == 4)
5480 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5481 break;
5482 default:
5483 break;
5486 /* Generate calls to libmass if appropriate. */
5487 if (rs6000_veclib_handler)
5488 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5490 return NULL_TREE;
5493 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5495 static tree
5496 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5497 tree type_in)
5499 machine_mode in_mode, out_mode;
5500 int in_n, out_n;
5502 if (TARGET_DEBUG_BUILTIN)
5503 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5504 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5505 GET_MODE_NAME (TYPE_MODE (type_out)),
5506 GET_MODE_NAME (TYPE_MODE (type_in)));
5508 if (TREE_CODE (type_out) != VECTOR_TYPE
5509 || TREE_CODE (type_in) != VECTOR_TYPE)
5510 return NULL_TREE;
5512 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5513 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5514 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5515 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5517 enum rs6000_builtins fn
5518 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5519 switch (fn)
5521 case RS6000_BUILTIN_RSQRTF:
5522 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5523 && out_mode == SFmode && out_n == 4
5524 && in_mode == SFmode && in_n == 4)
5525 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5526 break;
5527 case RS6000_BUILTIN_RSQRT:
5528 if (VECTOR_UNIT_VSX_P (V2DFmode)
5529 && out_mode == DFmode && out_n == 2
5530 && in_mode == DFmode && in_n == 2)
5531 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5532 break;
5533 case RS6000_BUILTIN_RECIPF:
5534 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5535 && out_mode == SFmode && out_n == 4
5536 && in_mode == SFmode && in_n == 4)
5537 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5538 break;
5539 case RS6000_BUILTIN_RECIP:
5540 if (VECTOR_UNIT_VSX_P (V2DFmode)
5541 && out_mode == DFmode && out_n == 2
5542 && in_mode == DFmode && in_n == 2)
5543 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5544 break;
5545 default:
5546 break;
5548 return NULL_TREE;
5551 /* Default CPU string for rs6000*_file_start functions. */
5552 static const char *rs6000_default_cpu;
5554 #ifdef USING_ELFOS_H
5555 const char *rs6000_machine;
5557 const char *
5558 rs6000_machine_from_flags (void)
5560 HOST_WIDE_INT flags = rs6000_isa_flags;
5562 /* Disable the flags that should never influence the .machine selection. */
5563 flags &= ~(OPTION_MASK_PPC_GFXOPT | OPTION_MASK_PPC_GPOPT);
5565 if ((flags & (ISA_FUTURE_MASKS_SERVER & ~ISA_3_0_MASKS_SERVER)) != 0)
5566 return "future";
5567 if ((flags & (ISA_3_0_MASKS_SERVER & ~ISA_2_7_MASKS_SERVER)) != 0)
5568 return "power9";
5569 if ((flags & (ISA_2_7_MASKS_SERVER & ~ISA_2_6_MASKS_SERVER)) != 0)
5570 return "power8";
5571 if ((flags & (ISA_2_6_MASKS_SERVER & ~ISA_2_5_MASKS_SERVER)) != 0)
5572 return "power7";
5573 if ((flags & (ISA_2_5_MASKS_SERVER & ~ISA_2_4_MASKS)) != 0)
5574 return "power6";
5575 if ((flags & (ISA_2_4_MASKS & ~ISA_2_1_MASKS)) != 0)
5576 return "power5";
5577 if ((flags & ISA_2_1_MASKS) != 0)
5578 return "power4";
5579 if ((flags & OPTION_MASK_POWERPC64) != 0)
5580 return "ppc64";
5581 return "ppc";
5584 void
5585 emit_asm_machine (void)
5587 fprintf (asm_out_file, "\t.machine %s\n", rs6000_machine);
5589 #endif
5591 /* Do anything needed at the start of the asm file. */
5593 static void
5594 rs6000_file_start (void)
5596 char buffer[80];
5597 const char *start = buffer;
5598 FILE *file = asm_out_file;
5600 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5602 default_file_start ();
5604 if (flag_verbose_asm)
5606 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5608 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5610 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5611 start = "";
5614 if (global_options_set.x_rs6000_cpu_index)
5616 fprintf (file, "%s -mcpu=%s", start,
5617 processor_target_table[rs6000_cpu_index].name);
5618 start = "";
5621 if (global_options_set.x_rs6000_tune_index)
5623 fprintf (file, "%s -mtune=%s", start,
5624 processor_target_table[rs6000_tune_index].name);
5625 start = "";
5628 if (PPC405_ERRATUM77)
5630 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5631 start = "";
5634 #ifdef USING_ELFOS_H
5635 switch (rs6000_sdata)
5637 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5638 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5639 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5640 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5643 if (rs6000_sdata && g_switch_value)
5645 fprintf (file, "%s -G %d", start,
5646 g_switch_value);
5647 start = "";
5649 #endif
5651 if (*start == '\0')
5652 putc ('\n', file);
5655 #ifdef USING_ELFOS_H
5656 rs6000_machine = rs6000_machine_from_flags ();
5657 emit_asm_machine ();
5658 #endif
5660 if (DEFAULT_ABI == ABI_ELFv2)
5661 fprintf (file, "\t.abiversion 2\n");
5665 /* Return nonzero if this function is known to have a null epilogue. */
5668 direct_return (void)
5670 if (reload_completed)
5672 rs6000_stack_t *info = rs6000_stack_info ();
5674 if (info->first_gp_reg_save == 32
5675 && info->first_fp_reg_save == 64
5676 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5677 && ! info->lr_save_p
5678 && ! info->cr_save_p
5679 && info->vrsave_size == 0
5680 && ! info->push_p)
5681 return 1;
5684 return 0;
5687 /* Helper for num_insns_constant. Calculate number of instructions to
5688 load VALUE to a single gpr using combinations of addi, addis, ori,
5689 oris and sldi instructions. */
5691 static int
5692 num_insns_constant_gpr (HOST_WIDE_INT value)
5694 /* signed constant loadable with addi */
5695 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5696 return 1;
5698 /* constant loadable with addis */
5699 else if ((value & 0xffff) == 0
5700 && (value >> 31 == -1 || value >> 31 == 0))
5701 return 1;
5703 else if (TARGET_POWERPC64)
5705 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5706 HOST_WIDE_INT high = value >> 31;
5708 if (high == 0 || high == -1)
5709 return 2;
5711 high >>= 1;
5713 if (low == 0)
5714 return num_insns_constant_gpr (high) + 1;
5715 else if (high == 0)
5716 return num_insns_constant_gpr (low) + 1;
5717 else
5718 return (num_insns_constant_gpr (high)
5719 + num_insns_constant_gpr (low) + 1);
5722 else
5723 return 2;
5726 /* Helper for num_insns_constant. Allow constants formed by the
5727 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5728 and handle modes that require multiple gprs. */
5730 static int
5731 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5733 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5734 int total = 0;
5735 while (nregs-- > 0)
5737 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5738 int insns = num_insns_constant_gpr (low);
5739 if (insns > 2
5740 /* We won't get more than 2 from num_insns_constant_gpr
5741 except when TARGET_POWERPC64 and mode is DImode or
5742 wider, so the register mode must be DImode. */
5743 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5744 insns = 2;
5745 total += insns;
5746 value >>= BITS_PER_WORD;
5748 return total;
5751 /* Return the number of instructions it takes to form a constant in as
5752 many gprs are needed for MODE. */
5755 num_insns_constant (rtx op, machine_mode mode)
5757 HOST_WIDE_INT val;
5759 switch (GET_CODE (op))
5761 case CONST_INT:
5762 val = INTVAL (op);
5763 break;
5765 case CONST_WIDE_INT:
5767 int insns = 0;
5768 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5769 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5770 DImode);
5771 return insns;
5774 case CONST_DOUBLE:
5776 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5778 if (mode == SFmode || mode == SDmode)
5780 long l;
5782 if (mode == SDmode)
5783 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5784 else
5785 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5786 /* See the first define_split in rs6000.md handling a
5787 const_double_operand. */
5788 val = l;
5789 mode = SImode;
5791 else if (mode == DFmode || mode == DDmode)
5793 long l[2];
5795 if (mode == DDmode)
5796 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5797 else
5798 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5800 /* See the second (32-bit) and third (64-bit) define_split
5801 in rs6000.md handling a const_double_operand. */
5802 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5803 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5804 mode = DImode;
5806 else if (mode == TFmode || mode == TDmode
5807 || mode == KFmode || mode == IFmode)
5809 long l[4];
5810 int insns;
5812 if (mode == TDmode)
5813 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5814 else
5815 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5817 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5818 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5819 insns = num_insns_constant_multi (val, DImode);
5820 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5821 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5822 insns += num_insns_constant_multi (val, DImode);
5823 return insns;
5825 else
5826 gcc_unreachable ();
5828 break;
5830 default:
5831 gcc_unreachable ();
5834 return num_insns_constant_multi (val, mode);
5837 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5838 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5839 corresponding element of the vector, but for V4SFmode, the
5840 corresponding "float" is interpreted as an SImode integer. */
5842 HOST_WIDE_INT
5843 const_vector_elt_as_int (rtx op, unsigned int elt)
5845 rtx tmp;
5847 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5848 gcc_assert (GET_MODE (op) != V2DImode
5849 && GET_MODE (op) != V2DFmode);
5851 tmp = CONST_VECTOR_ELT (op, elt);
5852 if (GET_MODE (op) == V4SFmode)
5853 tmp = gen_lowpart (SImode, tmp);
5854 return INTVAL (tmp);
5857 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5858 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5859 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5860 all items are set to the same value and contain COPIES replicas of the
5861 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5862 operand and the others are set to the value of the operand's msb. */
5864 static bool
5865 vspltis_constant (rtx op, unsigned step, unsigned copies)
5867 machine_mode mode = GET_MODE (op);
5868 machine_mode inner = GET_MODE_INNER (mode);
5870 unsigned i;
5871 unsigned nunits;
5872 unsigned bitsize;
5873 unsigned mask;
5875 HOST_WIDE_INT val;
5876 HOST_WIDE_INT splat_val;
5877 HOST_WIDE_INT msb_val;
5879 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5880 return false;
5882 nunits = GET_MODE_NUNITS (mode);
5883 bitsize = GET_MODE_BITSIZE (inner);
5884 mask = GET_MODE_MASK (inner);
5886 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5887 splat_val = val;
5888 msb_val = val >= 0 ? 0 : -1;
5890 /* Construct the value to be splatted, if possible. If not, return 0. */
5891 for (i = 2; i <= copies; i *= 2)
5893 HOST_WIDE_INT small_val;
5894 bitsize /= 2;
5895 small_val = splat_val >> bitsize;
5896 mask >>= bitsize;
5897 if (splat_val != ((HOST_WIDE_INT)
5898 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5899 | (small_val & mask)))
5900 return false;
5901 splat_val = small_val;
5904 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5905 if (EASY_VECTOR_15 (splat_val))
5908 /* Also check if we can splat, and then add the result to itself. Do so if
5909 the value is positive, of if the splat instruction is using OP's mode;
5910 for splat_val < 0, the splat and the add should use the same mode. */
5911 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5912 && (splat_val >= 0 || (step == 1 && copies == 1)))
5915 /* Also check if are loading up the most significant bit which can be done by
5916 loading up -1 and shifting the value left by -1. */
5917 else if (EASY_VECTOR_MSB (splat_val, inner))
5920 else
5921 return false;
5923 /* Check if VAL is present in every STEP-th element, and the
5924 other elements are filled with its most significant bit. */
5925 for (i = 1; i < nunits; ++i)
5927 HOST_WIDE_INT desired_val;
5928 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5929 if ((i & (step - 1)) == 0)
5930 desired_val = val;
5931 else
5932 desired_val = msb_val;
5934 if (desired_val != const_vector_elt_as_int (op, elt))
5935 return false;
5938 return true;
5941 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
5942 instruction, filling in the bottom elements with 0 or -1.
5944 Return 0 if the constant cannot be generated with VSLDOI. Return positive
5945 for the number of zeroes to shift in, or negative for the number of 0xff
5946 bytes to shift in.
5948 OP is a CONST_VECTOR. */
5951 vspltis_shifted (rtx op)
5953 machine_mode mode = GET_MODE (op);
5954 machine_mode inner = GET_MODE_INNER (mode);
5956 unsigned i, j;
5957 unsigned nunits;
5958 unsigned mask;
5960 HOST_WIDE_INT val;
5962 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
5963 return false;
5965 /* We need to create pseudo registers to do the shift, so don't recognize
5966 shift vector constants after reload. */
5967 if (!can_create_pseudo_p ())
5968 return false;
5970 nunits = GET_MODE_NUNITS (mode);
5971 mask = GET_MODE_MASK (inner);
5973 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
5975 /* Check if the value can really be the operand of a vspltis[bhw]. */
5976 if (EASY_VECTOR_15 (val))
5979 /* Also check if we are loading up the most significant bit which can be done
5980 by loading up -1 and shifting the value left by -1. */
5981 else if (EASY_VECTOR_MSB (val, inner))
5984 else
5985 return 0;
5987 /* Check if VAL is present in every STEP-th element until we find elements
5988 that are 0 or all 1 bits. */
5989 for (i = 1; i < nunits; ++i)
5991 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
5992 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
5994 /* If the value isn't the splat value, check for the remaining elements
5995 being 0/-1. */
5996 if (val != elt_val)
5998 if (elt_val == 0)
6000 for (j = i+1; j < nunits; ++j)
6002 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6003 if (const_vector_elt_as_int (op, elt2) != 0)
6004 return 0;
6007 return (nunits - i) * GET_MODE_SIZE (inner);
6010 else if ((elt_val & mask) == mask)
6012 for (j = i+1; j < nunits; ++j)
6014 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6015 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6016 return 0;
6019 return -((nunits - i) * GET_MODE_SIZE (inner));
6022 else
6023 return 0;
6027 /* If all elements are equal, we don't need to do VLSDOI. */
6028 return 0;
6032 /* Return true if OP is of the given MODE and can be synthesized
6033 with a vspltisb, vspltish or vspltisw. */
6035 bool
6036 easy_altivec_constant (rtx op, machine_mode mode)
6038 unsigned step, copies;
6040 if (mode == VOIDmode)
6041 mode = GET_MODE (op);
6042 else if (mode != GET_MODE (op))
6043 return false;
6045 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6046 constants. */
6047 if (mode == V2DFmode)
6048 return zero_constant (op, mode);
6050 else if (mode == V2DImode)
6052 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6053 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6054 return false;
6056 if (zero_constant (op, mode))
6057 return true;
6059 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6060 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6061 return true;
6063 return false;
6066 /* V1TImode is a special container for TImode. Ignore for now. */
6067 else if (mode == V1TImode)
6068 return false;
6070 /* Start with a vspltisw. */
6071 step = GET_MODE_NUNITS (mode) / 4;
6072 copies = 1;
6074 if (vspltis_constant (op, step, copies))
6075 return true;
6077 /* Then try with a vspltish. */
6078 if (step == 1)
6079 copies <<= 1;
6080 else
6081 step >>= 1;
6083 if (vspltis_constant (op, step, copies))
6084 return true;
6086 /* And finally a vspltisb. */
6087 if (step == 1)
6088 copies <<= 1;
6089 else
6090 step >>= 1;
6092 if (vspltis_constant (op, step, copies))
6093 return true;
6095 if (vspltis_shifted (op) != 0)
6096 return true;
6098 return false;
6101 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6102 result is OP. Abort if it is not possible. */
6105 gen_easy_altivec_constant (rtx op)
6107 machine_mode mode = GET_MODE (op);
6108 int nunits = GET_MODE_NUNITS (mode);
6109 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6110 unsigned step = nunits / 4;
6111 unsigned copies = 1;
6113 /* Start with a vspltisw. */
6114 if (vspltis_constant (op, step, copies))
6115 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6117 /* Then try with a vspltish. */
6118 if (step == 1)
6119 copies <<= 1;
6120 else
6121 step >>= 1;
6123 if (vspltis_constant (op, step, copies))
6124 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6126 /* And finally a vspltisb. */
6127 if (step == 1)
6128 copies <<= 1;
6129 else
6130 step >>= 1;
6132 if (vspltis_constant (op, step, copies))
6133 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6135 gcc_unreachable ();
6138 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6139 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6141 Return the number of instructions needed (1 or 2) into the address pointed
6142 via NUM_INSNS_PTR.
6144 Return the constant that is being split via CONSTANT_PTR. */
6146 bool
6147 xxspltib_constant_p (rtx op,
6148 machine_mode mode,
6149 int *num_insns_ptr,
6150 int *constant_ptr)
6152 size_t nunits = GET_MODE_NUNITS (mode);
6153 size_t i;
6154 HOST_WIDE_INT value;
6155 rtx element;
6157 /* Set the returned values to out of bound values. */
6158 *num_insns_ptr = -1;
6159 *constant_ptr = 256;
6161 if (!TARGET_P9_VECTOR)
6162 return false;
6164 if (mode == VOIDmode)
6165 mode = GET_MODE (op);
6167 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6168 return false;
6170 /* Handle (vec_duplicate <constant>). */
6171 if (GET_CODE (op) == VEC_DUPLICATE)
6173 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6174 && mode != V2DImode)
6175 return false;
6177 element = XEXP (op, 0);
6178 if (!CONST_INT_P (element))
6179 return false;
6181 value = INTVAL (element);
6182 if (!IN_RANGE (value, -128, 127))
6183 return false;
6186 /* Handle (const_vector [...]). */
6187 else if (GET_CODE (op) == CONST_VECTOR)
6189 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6190 && mode != V2DImode)
6191 return false;
6193 element = CONST_VECTOR_ELT (op, 0);
6194 if (!CONST_INT_P (element))
6195 return false;
6197 value = INTVAL (element);
6198 if (!IN_RANGE (value, -128, 127))
6199 return false;
6201 for (i = 1; i < nunits; i++)
6203 element = CONST_VECTOR_ELT (op, i);
6204 if (!CONST_INT_P (element))
6205 return false;
6207 if (value != INTVAL (element))
6208 return false;
6212 /* Handle integer constants being loaded into the upper part of the VSX
6213 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6214 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6215 else if (CONST_INT_P (op))
6217 if (!SCALAR_INT_MODE_P (mode))
6218 return false;
6220 value = INTVAL (op);
6221 if (!IN_RANGE (value, -128, 127))
6222 return false;
6224 if (!IN_RANGE (value, -1, 0))
6226 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6227 return false;
6229 if (EASY_VECTOR_15 (value))
6230 return false;
6234 else
6235 return false;
6237 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6238 sign extend. Special case 0/-1 to allow getting any VSX register instead
6239 of an Altivec register. */
6240 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6241 && EASY_VECTOR_15 (value))
6242 return false;
6244 /* Return # of instructions and the constant byte for XXSPLTIB. */
6245 if (mode == V16QImode)
6246 *num_insns_ptr = 1;
6248 else if (IN_RANGE (value, -1, 0))
6249 *num_insns_ptr = 1;
6251 else
6252 *num_insns_ptr = 2;
6254 *constant_ptr = (int) value;
6255 return true;
6258 const char *
6259 output_vec_const_move (rtx *operands)
6261 int shift;
6262 machine_mode mode;
6263 rtx dest, vec;
6265 dest = operands[0];
6266 vec = operands[1];
6267 mode = GET_MODE (dest);
6269 if (TARGET_VSX)
6271 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6272 int xxspltib_value = 256;
6273 int num_insns = -1;
6275 if (zero_constant (vec, mode))
6277 if (TARGET_P9_VECTOR)
6278 return "xxspltib %x0,0";
6280 else if (dest_vmx_p)
6281 return "vspltisw %0,0";
6283 else
6284 return "xxlxor %x0,%x0,%x0";
6287 if (all_ones_constant (vec, mode))
6289 if (TARGET_P9_VECTOR)
6290 return "xxspltib %x0,255";
6292 else if (dest_vmx_p)
6293 return "vspltisw %0,-1";
6295 else if (TARGET_P8_VECTOR)
6296 return "xxlorc %x0,%x0,%x0";
6298 else
6299 gcc_unreachable ();
6302 if (TARGET_P9_VECTOR
6303 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6305 if (num_insns == 1)
6307 operands[2] = GEN_INT (xxspltib_value & 0xff);
6308 return "xxspltib %x0,%2";
6311 return "#";
6315 if (TARGET_ALTIVEC)
6317 rtx splat_vec;
6319 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6320 if (zero_constant (vec, mode))
6321 return "vspltisw %0,0";
6323 if (all_ones_constant (vec, mode))
6324 return "vspltisw %0,-1";
6326 /* Do we need to construct a value using VSLDOI? */
6327 shift = vspltis_shifted (vec);
6328 if (shift != 0)
6329 return "#";
6331 splat_vec = gen_easy_altivec_constant (vec);
6332 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6333 operands[1] = XEXP (splat_vec, 0);
6334 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6335 return "#";
6337 switch (GET_MODE (splat_vec))
6339 case E_V4SImode:
6340 return "vspltisw %0,%1";
6342 case E_V8HImode:
6343 return "vspltish %0,%1";
6345 case E_V16QImode:
6346 return "vspltisb %0,%1";
6348 default:
6349 gcc_unreachable ();
6353 gcc_unreachable ();
6356 /* Initialize vector TARGET to VALS. */
6358 void
6359 rs6000_expand_vector_init (rtx target, rtx vals)
6361 machine_mode mode = GET_MODE (target);
6362 machine_mode inner_mode = GET_MODE_INNER (mode);
6363 int n_elts = GET_MODE_NUNITS (mode);
6364 int n_var = 0, one_var = -1;
6365 bool all_same = true, all_const_zero = true;
6366 rtx x, mem;
6367 int i;
6369 for (i = 0; i < n_elts; ++i)
6371 x = XVECEXP (vals, 0, i);
6372 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6373 ++n_var, one_var = i;
6374 else if (x != CONST0_RTX (inner_mode))
6375 all_const_zero = false;
6377 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6378 all_same = false;
6381 if (n_var == 0)
6383 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6384 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6385 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6387 /* Zero register. */
6388 emit_move_insn (target, CONST0_RTX (mode));
6389 return;
6391 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6393 /* Splat immediate. */
6394 emit_insn (gen_rtx_SET (target, const_vec));
6395 return;
6397 else
6399 /* Load from constant pool. */
6400 emit_move_insn (target, const_vec);
6401 return;
6405 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6406 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6408 rtx op[2];
6409 size_t i;
6410 size_t num_elements = all_same ? 1 : 2;
6411 for (i = 0; i < num_elements; i++)
6413 op[i] = XVECEXP (vals, 0, i);
6414 /* Just in case there is a SUBREG with a smaller mode, do a
6415 conversion. */
6416 if (GET_MODE (op[i]) != inner_mode)
6418 rtx tmp = gen_reg_rtx (inner_mode);
6419 convert_move (tmp, op[i], 0);
6420 op[i] = tmp;
6422 /* Allow load with splat double word. */
6423 else if (MEM_P (op[i]))
6425 if (!all_same)
6426 op[i] = force_reg (inner_mode, op[i]);
6428 else if (!REG_P (op[i]))
6429 op[i] = force_reg (inner_mode, op[i]);
6432 if (all_same)
6434 if (mode == V2DFmode)
6435 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6436 else
6437 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6439 else
6441 if (mode == V2DFmode)
6442 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6443 else
6444 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6446 return;
6449 /* Special case initializing vector int if we are on 64-bit systems with
6450 direct move or we have the ISA 3.0 instructions. */
6451 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6452 && TARGET_DIRECT_MOVE_64BIT)
6454 if (all_same)
6456 rtx element0 = XVECEXP (vals, 0, 0);
6457 if (MEM_P (element0))
6458 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6459 else
6460 element0 = force_reg (SImode, element0);
6462 if (TARGET_P9_VECTOR)
6463 emit_insn (gen_vsx_splat_v4si (target, element0));
6464 else
6466 rtx tmp = gen_reg_rtx (DImode);
6467 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6468 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6470 return;
6472 else
6474 rtx elements[4];
6475 size_t i;
6477 for (i = 0; i < 4; i++)
6478 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6480 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6481 elements[2], elements[3]));
6482 return;
6486 /* With single precision floating point on VSX, know that internally single
6487 precision is actually represented as a double, and either make 2 V2DF
6488 vectors, and convert these vectors to single precision, or do one
6489 conversion, and splat the result to the other elements. */
6490 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6492 if (all_same)
6494 rtx element0 = XVECEXP (vals, 0, 0);
6496 if (TARGET_P9_VECTOR)
6498 if (MEM_P (element0))
6499 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6501 emit_insn (gen_vsx_splat_v4sf (target, element0));
6504 else
6506 rtx freg = gen_reg_rtx (V4SFmode);
6507 rtx sreg = force_reg (SFmode, element0);
6508 rtx cvt = (TARGET_XSCVDPSPN
6509 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6510 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6512 emit_insn (cvt);
6513 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6514 const0_rtx));
6517 else
6519 rtx dbl_even = gen_reg_rtx (V2DFmode);
6520 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6521 rtx flt_even = gen_reg_rtx (V4SFmode);
6522 rtx flt_odd = gen_reg_rtx (V4SFmode);
6523 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6524 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6525 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6526 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6528 /* Use VMRGEW if we can instead of doing a permute. */
6529 if (TARGET_P8_VECTOR)
6531 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6532 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6533 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6534 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6535 if (BYTES_BIG_ENDIAN)
6536 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6537 else
6538 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6540 else
6542 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6543 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6544 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6545 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6546 rs6000_expand_extract_even (target, flt_even, flt_odd);
6549 return;
6552 /* Special case initializing vector short/char that are splats if we are on
6553 64-bit systems with direct move. */
6554 if (all_same && TARGET_DIRECT_MOVE_64BIT
6555 && (mode == V16QImode || mode == V8HImode))
6557 rtx op0 = XVECEXP (vals, 0, 0);
6558 rtx di_tmp = gen_reg_rtx (DImode);
6560 if (!REG_P (op0))
6561 op0 = force_reg (GET_MODE_INNER (mode), op0);
6563 if (mode == V16QImode)
6565 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6566 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6567 return;
6570 if (mode == V8HImode)
6572 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6573 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6574 return;
6578 /* Store value to stack temp. Load vector element. Splat. However, splat
6579 of 64-bit items is not supported on Altivec. */
6580 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6582 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6583 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6584 XVECEXP (vals, 0, 0));
6585 x = gen_rtx_UNSPEC (VOIDmode,
6586 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6587 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6588 gen_rtvec (2,
6589 gen_rtx_SET (target, mem),
6590 x)));
6591 x = gen_rtx_VEC_SELECT (inner_mode, target,
6592 gen_rtx_PARALLEL (VOIDmode,
6593 gen_rtvec (1, const0_rtx)));
6594 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6595 return;
6598 /* One field is non-constant. Load constant then overwrite
6599 varying field. */
6600 if (n_var == 1)
6602 rtx copy = copy_rtx (vals);
6604 /* Load constant part of vector, substitute neighboring value for
6605 varying element. */
6606 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6607 rs6000_expand_vector_init (target, copy);
6609 /* Insert variable. */
6610 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6611 return;
6614 /* Construct the vector in memory one field at a time
6615 and load the whole vector. */
6616 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6617 for (i = 0; i < n_elts; i++)
6618 emit_move_insn (adjust_address_nv (mem, inner_mode,
6619 i * GET_MODE_SIZE (inner_mode)),
6620 XVECEXP (vals, 0, i));
6621 emit_move_insn (target, mem);
6624 /* Set field ELT of TARGET to VAL. */
6626 void
6627 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6629 machine_mode mode = GET_MODE (target);
6630 machine_mode inner_mode = GET_MODE_INNER (mode);
6631 rtx reg = gen_reg_rtx (mode);
6632 rtx mask, mem, x;
6633 int width = GET_MODE_SIZE (inner_mode);
6634 int i;
6636 val = force_reg (GET_MODE (val), val);
6638 if (VECTOR_MEM_VSX_P (mode))
6640 rtx insn = NULL_RTX;
6641 rtx elt_rtx = GEN_INT (elt);
6643 if (mode == V2DFmode)
6644 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6646 else if (mode == V2DImode)
6647 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6649 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6651 if (mode == V4SImode)
6652 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6653 else if (mode == V8HImode)
6654 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6655 else if (mode == V16QImode)
6656 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6657 else if (mode == V4SFmode)
6658 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6661 if (insn)
6663 emit_insn (insn);
6664 return;
6668 /* Simplify setting single element vectors like V1TImode. */
6669 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6671 emit_move_insn (target, gen_lowpart (mode, val));
6672 return;
6675 /* Load single variable value. */
6676 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6677 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6678 x = gen_rtx_UNSPEC (VOIDmode,
6679 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6680 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6681 gen_rtvec (2,
6682 gen_rtx_SET (reg, mem),
6683 x)));
6685 /* Linear sequence. */
6686 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6687 for (i = 0; i < 16; ++i)
6688 XVECEXP (mask, 0, i) = GEN_INT (i);
6690 /* Set permute mask to insert element into target. */
6691 for (i = 0; i < width; ++i)
6692 XVECEXP (mask, 0, elt*width + i)
6693 = GEN_INT (i + 0x10);
6694 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6696 if (BYTES_BIG_ENDIAN)
6697 x = gen_rtx_UNSPEC (mode,
6698 gen_rtvec (3, target, reg,
6699 force_reg (V16QImode, x)),
6700 UNSPEC_VPERM);
6701 else
6703 if (TARGET_P9_VECTOR)
6704 x = gen_rtx_UNSPEC (mode,
6705 gen_rtvec (3, reg, target,
6706 force_reg (V16QImode, x)),
6707 UNSPEC_VPERMR);
6708 else
6710 /* Invert selector. We prefer to generate VNAND on P8 so
6711 that future fusion opportunities can kick in, but must
6712 generate VNOR elsewhere. */
6713 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6714 rtx iorx = (TARGET_P8_VECTOR
6715 ? gen_rtx_IOR (V16QImode, notx, notx)
6716 : gen_rtx_AND (V16QImode, notx, notx));
6717 rtx tmp = gen_reg_rtx (V16QImode);
6718 emit_insn (gen_rtx_SET (tmp, iorx));
6720 /* Permute with operands reversed and adjusted selector. */
6721 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6722 UNSPEC_VPERM);
6726 emit_insn (gen_rtx_SET (target, x));
6729 /* Extract field ELT from VEC into TARGET. */
6731 void
6732 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6734 machine_mode mode = GET_MODE (vec);
6735 machine_mode inner_mode = GET_MODE_INNER (mode);
6736 rtx mem;
6738 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6740 switch (mode)
6742 default:
6743 break;
6744 case E_V1TImode:
6745 emit_move_insn (target, gen_lowpart (TImode, vec));
6746 break;
6747 case E_V2DFmode:
6748 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6749 return;
6750 case E_V2DImode:
6751 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6752 return;
6753 case E_V4SFmode:
6754 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6755 return;
6756 case E_V16QImode:
6757 if (TARGET_DIRECT_MOVE_64BIT)
6759 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6760 return;
6762 else
6763 break;
6764 case E_V8HImode:
6765 if (TARGET_DIRECT_MOVE_64BIT)
6767 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6768 return;
6770 else
6771 break;
6772 case E_V4SImode:
6773 if (TARGET_DIRECT_MOVE_64BIT)
6775 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6776 return;
6778 break;
6781 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6782 && TARGET_DIRECT_MOVE_64BIT)
6784 if (GET_MODE (elt) != DImode)
6786 rtx tmp = gen_reg_rtx (DImode);
6787 convert_move (tmp, elt, 0);
6788 elt = tmp;
6790 else if (!REG_P (elt))
6791 elt = force_reg (DImode, elt);
6793 switch (mode)
6795 case E_V1TImode:
6796 emit_move_insn (target, gen_lowpart (TImode, vec));
6797 return;
6799 case E_V2DFmode:
6800 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6801 return;
6803 case E_V2DImode:
6804 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6805 return;
6807 case E_V4SFmode:
6808 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6809 return;
6811 case E_V4SImode:
6812 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6813 return;
6815 case E_V8HImode:
6816 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6817 return;
6819 case E_V16QImode:
6820 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6821 return;
6823 default:
6824 gcc_unreachable ();
6828 /* Allocate mode-sized buffer. */
6829 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6831 emit_move_insn (mem, vec);
6832 if (CONST_INT_P (elt))
6834 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6836 /* Add offset to field within buffer matching vector element. */
6837 mem = adjust_address_nv (mem, inner_mode,
6838 modulo_elt * GET_MODE_SIZE (inner_mode));
6839 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6841 else
6843 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6844 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6845 rtx new_addr = gen_reg_rtx (Pmode);
6847 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6848 if (ele_size > 1)
6849 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6850 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6851 new_addr = change_address (mem, inner_mode, new_addr);
6852 emit_move_insn (target, new_addr);
6856 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6857 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6858 temporary (BASE_TMP) to fixup the address. Return the new memory address
6859 that is valid for reads or writes to a given register (SCALAR_REG). */
6862 rs6000_adjust_vec_address (rtx scalar_reg,
6863 rtx mem,
6864 rtx element,
6865 rtx base_tmp,
6866 machine_mode scalar_mode)
6868 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6869 rtx addr = XEXP (mem, 0);
6870 rtx element_offset;
6871 rtx new_addr;
6872 bool valid_addr_p;
6874 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6875 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6877 /* Calculate what we need to add to the address to get the element
6878 address. */
6879 if (CONST_INT_P (element))
6880 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6881 else
6883 int byte_shift = exact_log2 (scalar_size);
6884 gcc_assert (byte_shift >= 0);
6886 if (byte_shift == 0)
6887 element_offset = element;
6889 else
6891 if (TARGET_POWERPC64)
6892 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6893 else
6894 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6896 element_offset = base_tmp;
6900 /* Create the new address pointing to the element within the vector. If we
6901 are adding 0, we don't have to change the address. */
6902 if (element_offset == const0_rtx)
6903 new_addr = addr;
6905 /* A simple indirect address can be converted into a reg + offset
6906 address. */
6907 else if (REG_P (addr) || SUBREG_P (addr))
6908 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
6910 /* Optimize D-FORM addresses with constant offset with a constant element, to
6911 include the element offset in the address directly. */
6912 else if (GET_CODE (addr) == PLUS)
6914 rtx op0 = XEXP (addr, 0);
6915 rtx op1 = XEXP (addr, 1);
6916 rtx insn;
6918 gcc_assert (REG_P (op0) || SUBREG_P (op0));
6919 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
6921 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
6922 rtx offset_rtx = GEN_INT (offset);
6924 if (IN_RANGE (offset, -32768, 32767)
6925 && (scalar_size < 8 || (offset & 0x3) == 0))
6926 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
6927 else
6929 emit_move_insn (base_tmp, offset_rtx);
6930 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6933 else
6935 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
6936 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
6938 /* Note, ADDI requires the register being added to be a base
6939 register. If the register was R0, load it up into the temporary
6940 and do the add. */
6941 if (op1_reg_p
6942 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
6944 insn = gen_add3_insn (base_tmp, op1, element_offset);
6945 gcc_assert (insn != NULL_RTX);
6946 emit_insn (insn);
6949 else if (ele_reg_p
6950 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
6952 insn = gen_add3_insn (base_tmp, element_offset, op1);
6953 gcc_assert (insn != NULL_RTX);
6954 emit_insn (insn);
6957 else
6959 emit_move_insn (base_tmp, op1);
6960 emit_insn (gen_add2_insn (base_tmp, element_offset));
6963 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6967 else
6969 emit_move_insn (base_tmp, addr);
6970 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
6973 /* If we have a PLUS, we need to see whether the particular register class
6974 allows for D-FORM or X-FORM addressing. */
6975 if (GET_CODE (new_addr) == PLUS)
6977 rtx op1 = XEXP (new_addr, 1);
6978 addr_mask_type addr_mask;
6979 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
6981 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
6982 if (INT_REGNO_P (scalar_regno))
6983 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
6985 else if (FP_REGNO_P (scalar_regno))
6986 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
6988 else if (ALTIVEC_REGNO_P (scalar_regno))
6989 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
6991 else
6992 gcc_unreachable ();
6994 if (REG_P (op1) || SUBREG_P (op1))
6995 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
6996 else
6997 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7000 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7001 valid_addr_p = true;
7003 else
7004 valid_addr_p = false;
7006 if (!valid_addr_p)
7008 emit_move_insn (base_tmp, new_addr);
7009 new_addr = base_tmp;
7012 return change_address (mem, scalar_mode, new_addr);
7015 /* Split a variable vec_extract operation into the component instructions. */
7017 void
7018 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7019 rtx tmp_altivec)
7021 machine_mode mode = GET_MODE (src);
7022 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7023 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7024 int byte_shift = exact_log2 (scalar_size);
7026 gcc_assert (byte_shift >= 0);
7028 /* If we are given a memory address, optimize to load just the element. We
7029 don't have to adjust the vector element number on little endian
7030 systems. */
7031 if (MEM_P (src))
7033 int num_elements = GET_MODE_NUNITS (mode);
7034 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7036 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7037 gcc_assert (REG_P (tmp_gpr));
7038 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7039 tmp_gpr, scalar_mode));
7040 return;
7043 else if (REG_P (src) || SUBREG_P (src))
7045 int num_elements = GET_MODE_NUNITS (mode);
7046 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7047 int bit_shift = 7 - exact_log2 (num_elements);
7048 rtx element2;
7049 unsigned int dest_regno = reg_or_subregno (dest);
7050 unsigned int src_regno = reg_or_subregno (src);
7051 unsigned int element_regno = reg_or_subregno (element);
7053 gcc_assert (REG_P (tmp_gpr));
7055 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7056 a general purpose register. */
7057 if (TARGET_P9_VECTOR
7058 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7059 && INT_REGNO_P (dest_regno)
7060 && ALTIVEC_REGNO_P (src_regno)
7061 && INT_REGNO_P (element_regno))
7063 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7064 rtx element_si = gen_rtx_REG (SImode, element_regno);
7066 if (mode == V16QImode)
7067 emit_insn (BYTES_BIG_ENDIAN
7068 ? gen_vextublx (dest_si, element_si, src)
7069 : gen_vextubrx (dest_si, element_si, src));
7071 else if (mode == V8HImode)
7073 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7074 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7075 emit_insn (BYTES_BIG_ENDIAN
7076 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7077 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7081 else
7083 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7084 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7085 emit_insn (BYTES_BIG_ENDIAN
7086 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7087 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7090 return;
7094 gcc_assert (REG_P (tmp_altivec));
7096 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7097 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7098 will shift the element into the upper position (adding 3 to convert a
7099 byte shift into a bit shift). */
7100 if (scalar_size == 8)
7102 if (!BYTES_BIG_ENDIAN)
7104 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7105 element2 = tmp_gpr;
7107 else
7108 element2 = element;
7110 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7111 bit. */
7112 emit_insn (gen_rtx_SET (tmp_gpr,
7113 gen_rtx_AND (DImode,
7114 gen_rtx_ASHIFT (DImode,
7115 element2,
7116 GEN_INT (6)),
7117 GEN_INT (64))));
7119 else
7121 if (!BYTES_BIG_ENDIAN)
7123 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7125 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7126 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7127 element2 = tmp_gpr;
7129 else
7130 element2 = element;
7132 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7135 /* Get the value into the lower byte of the Altivec register where VSLO
7136 expects it. */
7137 if (TARGET_P9_VECTOR)
7138 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7139 else if (can_create_pseudo_p ())
7140 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7141 else
7143 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7144 emit_move_insn (tmp_di, tmp_gpr);
7145 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7148 /* Do the VSLO to get the value into the final location. */
7149 switch (mode)
7151 case E_V2DFmode:
7152 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7153 return;
7155 case E_V2DImode:
7156 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7157 return;
7159 case E_V4SFmode:
7161 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7162 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7163 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7164 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7165 tmp_altivec));
7167 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7168 return;
7171 case E_V4SImode:
7172 case E_V8HImode:
7173 case E_V16QImode:
7175 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7176 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7177 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7178 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7179 tmp_altivec));
7180 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7181 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7182 GEN_INT (64 - bits_in_element)));
7183 return;
7186 default:
7187 gcc_unreachable ();
7190 return;
7192 else
7193 gcc_unreachable ();
7196 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7197 selects whether the alignment is abi mandated, optional, or
7198 both abi and optional alignment. */
7200 unsigned int
7201 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7203 if (how != align_opt)
7205 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7206 align = 128;
7209 if (how != align_abi)
7211 if (TREE_CODE (type) == ARRAY_TYPE
7212 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7214 if (align < BITS_PER_WORD)
7215 align = BITS_PER_WORD;
7219 return align;
7222 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7223 instructions simply ignore the low bits; VSX memory instructions
7224 are aligned to 4 or 8 bytes. */
7226 static bool
7227 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7229 return (STRICT_ALIGNMENT
7230 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7231 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7232 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7233 && (int) align < VECTOR_ALIGN (mode)))));
7236 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7238 bool
7239 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7241 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7243 if (computed != 128)
7245 static bool warned;
7246 if (!warned && warn_psabi)
7248 warned = true;
7249 inform (input_location,
7250 "the layout of aggregates containing vectors with"
7251 " %d-byte alignment has changed in GCC 5",
7252 computed / BITS_PER_UNIT);
7255 /* In current GCC there is no special case. */
7256 return false;
7259 return false;
7262 /* AIX increases natural record alignment to doubleword if the first
7263 field is an FP double while the FP fields remain word aligned. */
7265 unsigned int
7266 rs6000_special_round_type_align (tree type, unsigned int computed,
7267 unsigned int specified)
7269 unsigned int align = MAX (computed, specified);
7270 tree field = TYPE_FIELDS (type);
7272 /* Skip all non field decls */
7273 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7274 field = DECL_CHAIN (field);
7276 if (field != NULL && field != type)
7278 type = TREE_TYPE (field);
7279 while (TREE_CODE (type) == ARRAY_TYPE)
7280 type = TREE_TYPE (type);
7282 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7283 align = MAX (align, 64);
7286 return align;
7289 /* Darwin increases record alignment to the natural alignment of
7290 the first field. */
7292 unsigned int
7293 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7294 unsigned int specified)
7296 unsigned int align = MAX (computed, specified);
7298 if (TYPE_PACKED (type))
7299 return align;
7301 /* Find the first field, looking down into aggregates. */
7302 do {
7303 tree field = TYPE_FIELDS (type);
7304 /* Skip all non field decls */
7305 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7306 field = DECL_CHAIN (field);
7307 if (! field)
7308 break;
7309 /* A packed field does not contribute any extra alignment. */
7310 if (DECL_PACKED (field))
7311 return align;
7312 type = TREE_TYPE (field);
7313 while (TREE_CODE (type) == ARRAY_TYPE)
7314 type = TREE_TYPE (type);
7315 } while (AGGREGATE_TYPE_P (type));
7317 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7318 align = MAX (align, TYPE_ALIGN (type));
7320 return align;
7323 /* Return 1 for an operand in small memory on V.4/eabi. */
7326 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7327 machine_mode mode ATTRIBUTE_UNUSED)
7329 #if TARGET_ELF
7330 rtx sym_ref;
7332 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7333 return 0;
7335 if (DEFAULT_ABI != ABI_V4)
7336 return 0;
7338 if (SYMBOL_REF_P (op))
7339 sym_ref = op;
7341 else if (GET_CODE (op) != CONST
7342 || GET_CODE (XEXP (op, 0)) != PLUS
7343 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7344 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7345 return 0;
7347 else
7349 rtx sum = XEXP (op, 0);
7350 HOST_WIDE_INT summand;
7352 /* We have to be careful here, because it is the referenced address
7353 that must be 32k from _SDA_BASE_, not just the symbol. */
7354 summand = INTVAL (XEXP (sum, 1));
7355 if (summand < 0 || summand > g_switch_value)
7356 return 0;
7358 sym_ref = XEXP (sum, 0);
7361 return SYMBOL_REF_SMALL_P (sym_ref);
7362 #else
7363 return 0;
7364 #endif
7367 /* Return true if either operand is a general purpose register. */
7369 bool
7370 gpr_or_gpr_p (rtx op0, rtx op1)
7372 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7373 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7376 /* Return true if this is a move direct operation between GPR registers and
7377 floating point/VSX registers. */
7379 bool
7380 direct_move_p (rtx op0, rtx op1)
7382 if (!REG_P (op0) || !REG_P (op1))
7383 return false;
7385 if (!TARGET_DIRECT_MOVE)
7386 return false;
7388 int regno0 = REGNO (op0);
7389 int regno1 = REGNO (op1);
7390 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7391 return false;
7393 if (INT_REGNO_P (regno0) && VSX_REGNO_P (regno1))
7394 return true;
7396 if (VSX_REGNO_P (regno0) && INT_REGNO_P (regno1))
7397 return true;
7399 return false;
7402 /* Return true if the ADDR is an acceptable address for a quad memory
7403 operation of mode MODE (either LQ/STQ for general purpose registers, or
7404 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7405 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7406 3.0 LXV/STXV instruction. */
7408 bool
7409 quad_address_p (rtx addr, machine_mode mode, bool strict)
7411 rtx op0, op1;
7413 if (GET_MODE_SIZE (mode) != 16)
7414 return false;
7416 if (legitimate_indirect_address_p (addr, strict))
7417 return true;
7419 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7420 return false;
7422 if (GET_CODE (addr) != PLUS)
7423 return false;
7425 op0 = XEXP (addr, 0);
7426 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7427 return false;
7429 op1 = XEXP (addr, 1);
7430 if (!CONST_INT_P (op1))
7431 return false;
7433 return quad_address_offset_p (INTVAL (op1));
7436 /* Return true if this is a load or store quad operation. This function does
7437 not handle the atomic quad memory instructions. */
7439 bool
7440 quad_load_store_p (rtx op0, rtx op1)
7442 bool ret;
7444 if (!TARGET_QUAD_MEMORY)
7445 ret = false;
7447 else if (REG_P (op0) && MEM_P (op1))
7448 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7449 && quad_memory_operand (op1, GET_MODE (op1))
7450 && !reg_overlap_mentioned_p (op0, op1));
7452 else if (MEM_P (op0) && REG_P (op1))
7453 ret = (quad_memory_operand (op0, GET_MODE (op0))
7454 && quad_int_reg_operand (op1, GET_MODE (op1)));
7456 else
7457 ret = false;
7459 if (TARGET_DEBUG_ADDR)
7461 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7462 ret ? "true" : "false");
7463 debug_rtx (gen_rtx_SET (op0, op1));
7466 return ret;
7469 /* Given an address, return a constant offset term if one exists. */
7471 static rtx
7472 address_offset (rtx op)
7474 if (GET_CODE (op) == PRE_INC
7475 || GET_CODE (op) == PRE_DEC)
7476 op = XEXP (op, 0);
7477 else if (GET_CODE (op) == PRE_MODIFY
7478 || GET_CODE (op) == LO_SUM)
7479 op = XEXP (op, 1);
7481 if (GET_CODE (op) == CONST)
7482 op = XEXP (op, 0);
7484 if (GET_CODE (op) == PLUS)
7485 op = XEXP (op, 1);
7487 if (CONST_INT_P (op))
7488 return op;
7490 return NULL_RTX;
7493 /* Return true if the MEM operand is a memory operand suitable for use
7494 with a (full width, possibly multiple) gpr load/store. On
7495 powerpc64 this means the offset must be divisible by 4.
7496 Implements 'Y' constraint.
7498 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7499 a constraint function we know the operand has satisfied a suitable
7500 memory predicate.
7502 Offsetting a lo_sum should not be allowed, except where we know by
7503 alignment that a 32k boundary is not crossed. Note that by
7504 "offsetting" here we mean a further offset to access parts of the
7505 MEM. It's fine to have a lo_sum where the inner address is offset
7506 from a sym, since the same sym+offset will appear in the high part
7507 of the address calculation. */
7509 bool
7510 mem_operand_gpr (rtx op, machine_mode mode)
7512 unsigned HOST_WIDE_INT offset;
7513 int extra;
7514 rtx addr = XEXP (op, 0);
7516 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7517 if (TARGET_UPDATE
7518 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7519 && mode_supports_pre_incdec_p (mode)
7520 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7521 return true;
7523 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7524 if (!rs6000_offsettable_memref_p (op, mode, false))
7525 return false;
7527 op = address_offset (addr);
7528 if (op == NULL_RTX)
7529 return true;
7531 offset = INTVAL (op);
7532 if (TARGET_POWERPC64 && (offset & 3) != 0)
7533 return false;
7535 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7536 if (extra < 0)
7537 extra = 0;
7539 if (GET_CODE (addr) == LO_SUM)
7540 /* For lo_sum addresses, we must allow any offset except one that
7541 causes a wrap, so test only the low 16 bits. */
7542 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7544 return offset + 0x8000 < 0x10000u - extra;
7547 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7548 enforce an offset divisible by 4 even for 32-bit. */
7550 bool
7551 mem_operand_ds_form (rtx op, machine_mode mode)
7553 unsigned HOST_WIDE_INT offset;
7554 int extra;
7555 rtx addr = XEXP (op, 0);
7557 if (!offsettable_address_p (false, mode, addr))
7558 return false;
7560 op = address_offset (addr);
7561 if (op == NULL_RTX)
7562 return true;
7564 offset = INTVAL (op);
7565 if ((offset & 3) != 0)
7566 return false;
7568 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7569 if (extra < 0)
7570 extra = 0;
7572 if (GET_CODE (addr) == LO_SUM)
7573 /* For lo_sum addresses, we must allow any offset except one that
7574 causes a wrap, so test only the low 16 bits. */
7575 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7577 return offset + 0x8000 < 0x10000u - extra;
7580 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7582 static bool
7583 reg_offset_addressing_ok_p (machine_mode mode)
7585 switch (mode)
7587 case E_V16QImode:
7588 case E_V8HImode:
7589 case E_V4SFmode:
7590 case E_V4SImode:
7591 case E_V2DFmode:
7592 case E_V2DImode:
7593 case E_V1TImode:
7594 case E_TImode:
7595 case E_TFmode:
7596 case E_KFmode:
7597 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7598 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7599 a vector mode, if we want to use the VSX registers to move it around,
7600 we need to restrict ourselves to reg+reg addressing. Similarly for
7601 IEEE 128-bit floating point that is passed in a single vector
7602 register. */
7603 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7604 return mode_supports_dq_form (mode);
7605 break;
7607 case E_SDmode:
7608 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7609 addressing for the LFIWZX and STFIWX instructions. */
7610 if (TARGET_NO_SDMODE_STACK)
7611 return false;
7612 break;
7614 default:
7615 break;
7618 return true;
7621 static bool
7622 virtual_stack_registers_memory_p (rtx op)
7624 int regnum;
7626 if (REG_P (op))
7627 regnum = REGNO (op);
7629 else if (GET_CODE (op) == PLUS
7630 && REG_P (XEXP (op, 0))
7631 && CONST_INT_P (XEXP (op, 1)))
7632 regnum = REGNO (XEXP (op, 0));
7634 else
7635 return false;
7637 return (regnum >= FIRST_VIRTUAL_REGISTER
7638 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7641 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7642 is known to not straddle a 32k boundary. This function is used
7643 to determine whether -mcmodel=medium code can use TOC pointer
7644 relative addressing for OP. This means the alignment of the TOC
7645 pointer must also be taken into account, and unfortunately that is
7646 only 8 bytes. */
7648 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7649 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7650 #endif
7652 static bool
7653 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7654 machine_mode mode)
7656 tree decl;
7657 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7659 if (!SYMBOL_REF_P (op))
7660 return false;
7662 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7663 SYMBOL_REF. */
7664 if (mode_supports_dq_form (mode))
7665 return false;
7667 dsize = GET_MODE_SIZE (mode);
7668 decl = SYMBOL_REF_DECL (op);
7669 if (!decl)
7671 if (dsize == 0)
7672 return false;
7674 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7675 replacing memory addresses with an anchor plus offset. We
7676 could find the decl by rummaging around in the block->objects
7677 VEC for the given offset but that seems like too much work. */
7678 dalign = BITS_PER_UNIT;
7679 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7680 && SYMBOL_REF_ANCHOR_P (op)
7681 && SYMBOL_REF_BLOCK (op) != NULL)
7683 struct object_block *block = SYMBOL_REF_BLOCK (op);
7685 dalign = block->alignment;
7686 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7688 else if (CONSTANT_POOL_ADDRESS_P (op))
7690 /* It would be nice to have get_pool_align().. */
7691 machine_mode cmode = get_pool_mode (op);
7693 dalign = GET_MODE_ALIGNMENT (cmode);
7696 else if (DECL_P (decl))
7698 dalign = DECL_ALIGN (decl);
7700 if (dsize == 0)
7702 /* Allow BLKmode when the entire object is known to not
7703 cross a 32k boundary. */
7704 if (!DECL_SIZE_UNIT (decl))
7705 return false;
7707 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7708 return false;
7710 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7711 if (dsize > 32768)
7712 return false;
7714 dalign /= BITS_PER_UNIT;
7715 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7716 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7717 return dalign >= dsize;
7720 else
7721 gcc_unreachable ();
7723 /* Find how many bits of the alignment we know for this access. */
7724 dalign /= BITS_PER_UNIT;
7725 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7726 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7727 mask = dalign - 1;
7728 lsb = offset & -offset;
7729 mask &= lsb - 1;
7730 dalign = mask + 1;
7732 return dalign >= dsize;
7735 static bool
7736 constant_pool_expr_p (rtx op)
7738 rtx base, offset;
7740 split_const (op, &base, &offset);
7741 return (SYMBOL_REF_P (base)
7742 && CONSTANT_POOL_ADDRESS_P (base)
7743 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7746 /* Create a TOC reference for symbol_ref SYMBOL. If LARGETOC_REG is non-null,
7747 use that as the register to put the HIGH value into if register allocation
7748 is already done. */
7751 create_TOC_reference (rtx symbol, rtx largetoc_reg)
7753 rtx tocrel, tocreg, hi;
7755 gcc_assert (TARGET_TOC);
7757 if (TARGET_DEBUG_ADDR)
7759 if (SYMBOL_REF_P (symbol))
7760 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
7761 XSTR (symbol, 0));
7762 else
7764 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
7765 GET_RTX_NAME (GET_CODE (symbol)));
7766 debug_rtx (symbol);
7770 if (!can_create_pseudo_p ())
7771 df_set_regs_ever_live (TOC_REGISTER, true);
7773 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
7774 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
7775 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
7776 return tocrel;
7778 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
7779 if (largetoc_reg != NULL)
7781 emit_move_insn (largetoc_reg, hi);
7782 hi = largetoc_reg;
7784 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
7787 /* These are only used to pass through from print_operand/print_operand_address
7788 to rs6000_output_addr_const_extra over the intervening function
7789 output_addr_const which is not target code. */
7790 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7792 /* Return true if OP is a toc pointer relative address (the output
7793 of create_TOC_reference). If STRICT, do not match non-split
7794 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7795 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7796 TOCREL_OFFSET_RET respectively. */
7798 bool
7799 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7800 const_rtx *tocrel_offset_ret)
7802 if (!TARGET_TOC)
7803 return false;
7805 if (TARGET_CMODEL != CMODEL_SMALL)
7807 /* When strict ensure we have everything tidy. */
7808 if (strict
7809 && !(GET_CODE (op) == LO_SUM
7810 && REG_P (XEXP (op, 0))
7811 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7812 return false;
7814 /* When not strict, allow non-split TOC addresses and also allow
7815 (lo_sum (high ..)) TOC addresses created during reload. */
7816 if (GET_CODE (op) == LO_SUM)
7817 op = XEXP (op, 1);
7820 const_rtx tocrel_base = op;
7821 const_rtx tocrel_offset = const0_rtx;
7823 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7825 tocrel_base = XEXP (op, 0);
7826 tocrel_offset = XEXP (op, 1);
7829 if (tocrel_base_ret)
7830 *tocrel_base_ret = tocrel_base;
7831 if (tocrel_offset_ret)
7832 *tocrel_offset_ret = tocrel_offset;
7834 return (GET_CODE (tocrel_base) == UNSPEC
7835 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7836 && REG_P (XVECEXP (tocrel_base, 0, 1))
7837 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7840 /* Return true if X is a constant pool address, and also for cmodel=medium
7841 if X is a toc-relative address known to be offsettable within MODE. */
7843 bool
7844 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7845 bool strict)
7847 const_rtx tocrel_base, tocrel_offset;
7848 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7849 && (TARGET_CMODEL != CMODEL_MEDIUM
7850 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7851 || mode == QImode
7852 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7853 INTVAL (tocrel_offset), mode)));
7856 static bool
7857 legitimate_small_data_p (machine_mode mode, rtx x)
7859 return (DEFAULT_ABI == ABI_V4
7860 && !flag_pic && !TARGET_TOC
7861 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7862 && small_data_operand (x, mode));
7865 bool
7866 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7867 bool strict, bool worst_case)
7869 unsigned HOST_WIDE_INT offset;
7870 unsigned int extra;
7872 if (GET_CODE (x) != PLUS)
7873 return false;
7874 if (!REG_P (XEXP (x, 0)))
7875 return false;
7876 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7877 return false;
7878 if (mode_supports_dq_form (mode))
7879 return quad_address_p (x, mode, strict);
7880 if (!reg_offset_addressing_ok_p (mode))
7881 return virtual_stack_registers_memory_p (x);
7882 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7883 return true;
7884 if (!CONST_INT_P (XEXP (x, 1)))
7885 return false;
7887 offset = INTVAL (XEXP (x, 1));
7888 extra = 0;
7889 switch (mode)
7891 case E_DFmode:
7892 case E_DDmode:
7893 case E_DImode:
7894 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7895 addressing. */
7896 if (VECTOR_MEM_VSX_P (mode))
7897 return false;
7899 if (!worst_case)
7900 break;
7901 if (!TARGET_POWERPC64)
7902 extra = 4;
7903 else if (offset & 3)
7904 return false;
7905 break;
7907 case E_TFmode:
7908 case E_IFmode:
7909 case E_KFmode:
7910 case E_TDmode:
7911 case E_TImode:
7912 case E_PTImode:
7913 extra = 8;
7914 if (!worst_case)
7915 break;
7916 if (!TARGET_POWERPC64)
7917 extra = 12;
7918 else if (offset & 3)
7919 return false;
7920 break;
7922 default:
7923 break;
7926 offset += 0x8000;
7927 return offset < 0x10000 - extra;
7930 bool
7931 legitimate_indexed_address_p (rtx x, int strict)
7933 rtx op0, op1;
7935 if (GET_CODE (x) != PLUS)
7936 return false;
7938 op0 = XEXP (x, 0);
7939 op1 = XEXP (x, 1);
7941 return (REG_P (op0) && REG_P (op1)
7942 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7943 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7944 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7945 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7948 bool
7949 avoiding_indexed_address_p (machine_mode mode)
7951 /* Avoid indexed addressing for modes that have non-indexed
7952 load/store instruction forms. */
7953 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7956 bool
7957 legitimate_indirect_address_p (rtx x, int strict)
7959 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
7962 bool
7963 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7965 if (!TARGET_MACHO || !flag_pic
7966 || mode != SImode || !MEM_P (x))
7967 return false;
7968 x = XEXP (x, 0);
7970 if (GET_CODE (x) != LO_SUM)
7971 return false;
7972 if (!REG_P (XEXP (x, 0)))
7973 return false;
7974 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7975 return false;
7976 x = XEXP (x, 1);
7978 return CONSTANT_P (x);
7981 static bool
7982 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7984 if (GET_CODE (x) != LO_SUM)
7985 return false;
7986 if (!REG_P (XEXP (x, 0)))
7987 return false;
7988 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7989 return false;
7990 /* quad word addresses are restricted, and we can't use LO_SUM. */
7991 if (mode_supports_dq_form (mode))
7992 return false;
7993 x = XEXP (x, 1);
7995 if (TARGET_ELF || TARGET_MACHO)
7997 bool large_toc_ok;
7999 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8000 return false;
8001 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8002 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8003 recognizes some LO_SUM addresses as valid although this
8004 function says opposite. In most cases, LRA through different
8005 transformations can generate correct code for address reloads.
8006 It cannot manage only some LO_SUM cases. So we need to add
8007 code here saying that some addresses are still valid. */
8008 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8009 && small_toc_ref (x, VOIDmode));
8010 if (TARGET_TOC && ! large_toc_ok)
8011 return false;
8012 if (GET_MODE_NUNITS (mode) != 1)
8013 return false;
8014 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8015 && !(/* ??? Assume floating point reg based on mode? */
8016 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8017 return false;
8019 return CONSTANT_P (x) || large_toc_ok;
8022 return false;
8026 /* Try machine-dependent ways of modifying an illegitimate address
8027 to be legitimate. If we find one, return the new, valid address.
8028 This is used from only one place: `memory_address' in explow.c.
8030 OLDX is the address as it was before break_out_memory_refs was
8031 called. In some cases it is useful to look at this to decide what
8032 needs to be done.
8034 It is always safe for this function to do nothing. It exists to
8035 recognize opportunities to optimize the output.
8037 On RS/6000, first check for the sum of a register with a constant
8038 integer that is out of range. If so, generate code to add the
8039 constant with the low-order 16 bits masked to the register and force
8040 this result into another register (this can be done with `cau').
8041 Then generate an address of REG+(CONST&0xffff), allowing for the
8042 possibility of bit 16 being a one.
8044 Then check for the sum of a register and something not constant, try to
8045 load the other things into a register and return the sum. */
8047 static rtx
8048 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8049 machine_mode mode)
8051 unsigned int extra;
8053 if (!reg_offset_addressing_ok_p (mode)
8054 || mode_supports_dq_form (mode))
8056 if (virtual_stack_registers_memory_p (x))
8057 return x;
8059 /* In theory we should not be seeing addresses of the form reg+0,
8060 but just in case it is generated, optimize it away. */
8061 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8062 return force_reg (Pmode, XEXP (x, 0));
8064 /* For TImode with load/store quad, restrict addresses to just a single
8065 pointer, so it works with both GPRs and VSX registers. */
8066 /* Make sure both operands are registers. */
8067 else if (GET_CODE (x) == PLUS
8068 && (mode != TImode || !TARGET_VSX))
8069 return gen_rtx_PLUS (Pmode,
8070 force_reg (Pmode, XEXP (x, 0)),
8071 force_reg (Pmode, XEXP (x, 1)));
8072 else
8073 return force_reg (Pmode, x);
8075 if (SYMBOL_REF_P (x))
8077 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8078 if (model != 0)
8079 return rs6000_legitimize_tls_address (x, model);
8082 extra = 0;
8083 switch (mode)
8085 case E_TFmode:
8086 case E_TDmode:
8087 case E_TImode:
8088 case E_PTImode:
8089 case E_IFmode:
8090 case E_KFmode:
8091 /* As in legitimate_offset_address_p we do not assume
8092 worst-case. The mode here is just a hint as to the registers
8093 used. A TImode is usually in gprs, but may actually be in
8094 fprs. Leave worst-case scenario for reload to handle via
8095 insn constraints. PTImode is only GPRs. */
8096 extra = 8;
8097 break;
8098 default:
8099 break;
8102 if (GET_CODE (x) == PLUS
8103 && REG_P (XEXP (x, 0))
8104 && CONST_INT_P (XEXP (x, 1))
8105 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8106 >= 0x10000 - extra))
8108 HOST_WIDE_INT high_int, low_int;
8109 rtx sum;
8110 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8111 if (low_int >= 0x8000 - extra)
8112 low_int = 0;
8113 high_int = INTVAL (XEXP (x, 1)) - low_int;
8114 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8115 GEN_INT (high_int)), 0);
8116 return plus_constant (Pmode, sum, low_int);
8118 else if (GET_CODE (x) == PLUS
8119 && REG_P (XEXP (x, 0))
8120 && !CONST_INT_P (XEXP (x, 1))
8121 && GET_MODE_NUNITS (mode) == 1
8122 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8123 || (/* ??? Assume floating point reg based on mode? */
8124 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8125 && !avoiding_indexed_address_p (mode))
8127 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8128 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8130 else if ((TARGET_ELF
8131 #if TARGET_MACHO
8132 || !MACHO_DYNAMIC_NO_PIC_P
8133 #endif
8135 && TARGET_32BIT
8136 && TARGET_NO_TOC_OR_PCREL
8137 && !flag_pic
8138 && !CONST_INT_P (x)
8139 && !CONST_WIDE_INT_P (x)
8140 && !CONST_DOUBLE_P (x)
8141 && CONSTANT_P (x)
8142 && GET_MODE_NUNITS (mode) == 1
8143 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8144 || (/* ??? Assume floating point reg based on mode? */
8145 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8147 rtx reg = gen_reg_rtx (Pmode);
8148 if (TARGET_ELF)
8149 emit_insn (gen_elf_high (reg, x));
8150 else
8151 emit_insn (gen_macho_high (reg, x));
8152 return gen_rtx_LO_SUM (Pmode, reg, x);
8154 else if (TARGET_TOC
8155 && SYMBOL_REF_P (x)
8156 && constant_pool_expr_p (x)
8157 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8158 return create_TOC_reference (x, NULL_RTX);
8159 else
8160 return x;
8163 /* Debug version of rs6000_legitimize_address. */
8164 static rtx
8165 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8167 rtx ret;
8168 rtx_insn *insns;
8170 start_sequence ();
8171 ret = rs6000_legitimize_address (x, oldx, mode);
8172 insns = get_insns ();
8173 end_sequence ();
8175 if (ret != x)
8177 fprintf (stderr,
8178 "\nrs6000_legitimize_address: mode %s, old code %s, "
8179 "new code %s, modified\n",
8180 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8181 GET_RTX_NAME (GET_CODE (ret)));
8183 fprintf (stderr, "Original address:\n");
8184 debug_rtx (x);
8186 fprintf (stderr, "oldx:\n");
8187 debug_rtx (oldx);
8189 fprintf (stderr, "New address:\n");
8190 debug_rtx (ret);
8192 if (insns)
8194 fprintf (stderr, "Insns added:\n");
8195 debug_rtx_list (insns, 20);
8198 else
8200 fprintf (stderr,
8201 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8202 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8204 debug_rtx (x);
8207 if (insns)
8208 emit_insn (insns);
8210 return ret;
8213 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8214 We need to emit DTP-relative relocations. */
8216 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8217 static void
8218 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8220 switch (size)
8222 case 4:
8223 fputs ("\t.long\t", file);
8224 break;
8225 case 8:
8226 fputs (DOUBLE_INT_ASM_OP, file);
8227 break;
8228 default:
8229 gcc_unreachable ();
8231 output_addr_const (file, x);
8232 if (TARGET_ELF)
8233 fputs ("@dtprel+0x8000", file);
8234 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8236 switch (SYMBOL_REF_TLS_MODEL (x))
8238 case 0:
8239 break;
8240 case TLS_MODEL_LOCAL_EXEC:
8241 fputs ("@le", file);
8242 break;
8243 case TLS_MODEL_INITIAL_EXEC:
8244 fputs ("@ie", file);
8245 break;
8246 case TLS_MODEL_GLOBAL_DYNAMIC:
8247 case TLS_MODEL_LOCAL_DYNAMIC:
8248 fputs ("@m", file);
8249 break;
8250 default:
8251 gcc_unreachable ();
8256 /* Return true if X is a symbol that refers to real (rather than emulated)
8257 TLS. */
8259 static bool
8260 rs6000_real_tls_symbol_ref_p (rtx x)
8262 return (SYMBOL_REF_P (x)
8263 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8266 /* In the name of slightly smaller debug output, and to cater to
8267 general assembler lossage, recognize various UNSPEC sequences
8268 and turn them back into a direct symbol reference. */
8270 static rtx
8271 rs6000_delegitimize_address (rtx orig_x)
8273 rtx x, y, offset;
8275 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8276 orig_x = XVECEXP (orig_x, 0, 0);
8278 orig_x = delegitimize_mem_from_attrs (orig_x);
8280 x = orig_x;
8281 if (MEM_P (x))
8282 x = XEXP (x, 0);
8284 y = x;
8285 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8286 y = XEXP (y, 1);
8288 offset = NULL_RTX;
8289 if (GET_CODE (y) == PLUS
8290 && GET_MODE (y) == Pmode
8291 && CONST_INT_P (XEXP (y, 1)))
8293 offset = XEXP (y, 1);
8294 y = XEXP (y, 0);
8297 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8299 y = XVECEXP (y, 0, 0);
8301 #ifdef HAVE_AS_TLS
8302 /* Do not associate thread-local symbols with the original
8303 constant pool symbol. */
8304 if (TARGET_XCOFF
8305 && SYMBOL_REF_P (y)
8306 && CONSTANT_POOL_ADDRESS_P (y)
8307 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8308 return orig_x;
8309 #endif
8311 if (offset != NULL_RTX)
8312 y = gen_rtx_PLUS (Pmode, y, offset);
8313 if (!MEM_P (orig_x))
8314 return y;
8315 else
8316 return replace_equiv_address_nv (orig_x, y);
8319 if (TARGET_MACHO
8320 && GET_CODE (orig_x) == LO_SUM
8321 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8323 y = XEXP (XEXP (orig_x, 1), 0);
8324 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8325 return XVECEXP (y, 0, 0);
8328 return orig_x;
8331 /* Return true if X shouldn't be emitted into the debug info.
8332 The linker doesn't like .toc section references from
8333 .debug_* sections, so reject .toc section symbols. */
8335 static bool
8336 rs6000_const_not_ok_for_debug_p (rtx x)
8338 if (GET_CODE (x) == UNSPEC)
8339 return true;
8340 if (SYMBOL_REF_P (x)
8341 && CONSTANT_POOL_ADDRESS_P (x))
8343 rtx c = get_pool_constant (x);
8344 machine_mode cmode = get_pool_mode (x);
8345 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8346 return true;
8349 return false;
8352 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8354 static bool
8355 rs6000_legitimate_combined_insn (rtx_insn *insn)
8357 int icode = INSN_CODE (insn);
8359 /* Reject creating doloop insns. Combine should not be allowed
8360 to create these for a number of reasons:
8361 1) In a nested loop, if combine creates one of these in an
8362 outer loop and the register allocator happens to allocate ctr
8363 to the outer loop insn, then the inner loop can't use ctr.
8364 Inner loops ought to be more highly optimized.
8365 2) Combine often wants to create one of these from what was
8366 originally a three insn sequence, first combining the three
8367 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8368 allocated ctr, the splitter takes use back to the three insn
8369 sequence. It's better to stop combine at the two insn
8370 sequence.
8371 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8372 insns, the register allocator sometimes uses floating point
8373 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8374 jump insn and output reloads are not implemented for jumps,
8375 the ctrsi/ctrdi splitters need to handle all possible cases.
8376 That's a pain, and it gets to be seriously difficult when a
8377 splitter that runs after reload needs memory to transfer from
8378 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8379 for the difficult case. It's better to not create problems
8380 in the first place. */
8381 if (icode != CODE_FOR_nothing
8382 && (icode == CODE_FOR_bdz_si
8383 || icode == CODE_FOR_bdz_di
8384 || icode == CODE_FOR_bdnz_si
8385 || icode == CODE_FOR_bdnz_di
8386 || icode == CODE_FOR_bdztf_si
8387 || icode == CODE_FOR_bdztf_di
8388 || icode == CODE_FOR_bdnztf_si
8389 || icode == CODE_FOR_bdnztf_di))
8390 return false;
8392 return true;
8395 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8397 static GTY(()) rtx rs6000_tls_symbol;
8398 static rtx
8399 rs6000_tls_get_addr (void)
8401 if (!rs6000_tls_symbol)
8402 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8404 return rs6000_tls_symbol;
8407 /* Construct the SYMBOL_REF for TLS GOT references. */
8409 static GTY(()) rtx rs6000_got_symbol;
8411 rs6000_got_sym (void)
8413 if (!rs6000_got_symbol)
8415 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8416 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8417 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8420 return rs6000_got_symbol;
8423 /* AIX Thread-Local Address support. */
8425 static rtx
8426 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8428 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8429 const char *name;
8430 char *tlsname;
8432 name = XSTR (addr, 0);
8433 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8434 or the symbol will be in TLS private data section. */
8435 if (name[strlen (name) - 1] != ']'
8436 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8437 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8439 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8440 strcpy (tlsname, name);
8441 strcat (tlsname,
8442 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8443 tlsaddr = copy_rtx (addr);
8444 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8446 else
8447 tlsaddr = addr;
8449 /* Place addr into TOC constant pool. */
8450 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8452 /* Output the TOC entry and create the MEM referencing the value. */
8453 if (constant_pool_expr_p (XEXP (sym, 0))
8454 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8456 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8457 mem = gen_const_mem (Pmode, tocref);
8458 set_mem_alias_set (mem, get_TOC_alias_set ());
8460 else
8461 return sym;
8463 /* Use global-dynamic for local-dynamic. */
8464 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8465 || model == TLS_MODEL_LOCAL_DYNAMIC)
8467 /* Create new TOC reference for @m symbol. */
8468 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8469 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8470 strcpy (tlsname, "*LCM");
8471 strcat (tlsname, name + 3);
8472 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8473 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8474 tocref = create_TOC_reference (modaddr, NULL_RTX);
8475 rtx modmem = gen_const_mem (Pmode, tocref);
8476 set_mem_alias_set (modmem, get_TOC_alias_set ());
8478 rtx modreg = gen_reg_rtx (Pmode);
8479 emit_insn (gen_rtx_SET (modreg, modmem));
8481 tmpreg = gen_reg_rtx (Pmode);
8482 emit_insn (gen_rtx_SET (tmpreg, mem));
8484 dest = gen_reg_rtx (Pmode);
8485 if (TARGET_32BIT)
8486 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8487 else
8488 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8489 return dest;
8491 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8492 else if (TARGET_32BIT)
8494 tlsreg = gen_reg_rtx (SImode);
8495 emit_insn (gen_tls_get_tpointer (tlsreg));
8497 else
8498 tlsreg = gen_rtx_REG (DImode, 13);
8500 /* Load the TOC value into temporary register. */
8501 tmpreg = gen_reg_rtx (Pmode);
8502 emit_insn (gen_rtx_SET (tmpreg, mem));
8503 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8504 gen_rtx_MINUS (Pmode, addr, tlsreg));
8506 /* Add TOC symbol value to TLS pointer. */
8507 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8509 return dest;
8512 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8513 __tls_get_addr call. */
8515 void
8516 rs6000_output_tlsargs (rtx *operands)
8518 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8519 rtx op[3];
8521 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8522 op[0] = operands[0];
8523 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8524 op[1] = XVECEXP (operands[2], 0, 0);
8525 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8527 /* The GOT register. */
8528 op[2] = XVECEXP (operands[2], 0, 1);
8529 if (TARGET_CMODEL != CMODEL_SMALL)
8530 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8531 "addi %0,%0,%1@got@tlsgd@l", op);
8532 else
8533 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8535 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8537 if (TARGET_CMODEL != CMODEL_SMALL)
8538 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8539 "addi %0,%0,%&@got@tlsld@l", op);
8540 else
8541 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8543 else
8544 gcc_unreachable ();
8547 /* Passes the tls arg value for global dynamic and local dynamic
8548 emit_library_call_value in rs6000_legitimize_tls_address to
8549 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8550 marker relocs put on __tls_get_addr calls. */
8551 static rtx global_tlsarg;
8553 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8554 this (thread-local) address. */
8556 static rtx
8557 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8559 rtx dest, insn;
8561 if (TARGET_XCOFF)
8562 return rs6000_legitimize_tls_address_aix (addr, model);
8564 dest = gen_reg_rtx (Pmode);
8565 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8567 rtx tlsreg;
8569 if (TARGET_64BIT)
8571 tlsreg = gen_rtx_REG (Pmode, 13);
8572 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8574 else
8576 tlsreg = gen_rtx_REG (Pmode, 2);
8577 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8579 emit_insn (insn);
8581 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8583 rtx tlsreg, tmp;
8585 tmp = gen_reg_rtx (Pmode);
8586 if (TARGET_64BIT)
8588 tlsreg = gen_rtx_REG (Pmode, 13);
8589 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8591 else
8593 tlsreg = gen_rtx_REG (Pmode, 2);
8594 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8596 emit_insn (insn);
8597 if (TARGET_64BIT)
8598 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8599 else
8600 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8601 emit_insn (insn);
8603 else
8605 rtx got, tga, tmp1, tmp2;
8607 /* We currently use relocations like @got@tlsgd for tls, which
8608 means the linker will handle allocation of tls entries, placing
8609 them in the .got section. So use a pointer to the .got section,
8610 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8611 or to secondary GOT sections used by 32-bit -fPIC. */
8612 if (TARGET_64BIT)
8613 got = gen_rtx_REG (Pmode, 2);
8614 else
8616 if (flag_pic == 1)
8617 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8618 else
8620 rtx gsym = rs6000_got_sym ();
8621 got = gen_reg_rtx (Pmode);
8622 if (flag_pic == 0)
8623 rs6000_emit_move (got, gsym, Pmode);
8624 else
8626 rtx mem, lab;
8628 tmp1 = gen_reg_rtx (Pmode);
8629 tmp2 = gen_reg_rtx (Pmode);
8630 mem = gen_const_mem (Pmode, tmp1);
8631 lab = gen_label_rtx ();
8632 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8633 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8634 if (TARGET_LINK_STACK)
8635 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8636 emit_move_insn (tmp2, mem);
8637 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8638 set_unique_reg_note (last, REG_EQUAL, gsym);
8643 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8645 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8646 UNSPEC_TLSGD);
8647 tga = rs6000_tls_get_addr ();
8648 global_tlsarg = arg;
8649 if (TARGET_TLS_MARKERS)
8651 rtx argreg = gen_rtx_REG (Pmode, 3);
8652 emit_insn (gen_rtx_SET (argreg, arg));
8653 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8654 argreg, Pmode);
8656 else
8657 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8658 global_tlsarg = NULL_RTX;
8660 /* Make a note so that the result of this call can be CSEd. */
8661 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8662 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8663 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8665 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8667 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8668 tga = rs6000_tls_get_addr ();
8669 tmp1 = gen_reg_rtx (Pmode);
8670 global_tlsarg = arg;
8671 if (TARGET_TLS_MARKERS)
8673 rtx argreg = gen_rtx_REG (Pmode, 3);
8674 emit_insn (gen_rtx_SET (argreg, arg));
8675 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8676 argreg, Pmode);
8678 else
8679 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8680 global_tlsarg = NULL_RTX;
8682 /* Make a note so that the result of this call can be CSEd. */
8683 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8684 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8685 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8687 if (rs6000_tls_size == 16)
8689 if (TARGET_64BIT)
8690 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8691 else
8692 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8694 else if (rs6000_tls_size == 32)
8696 tmp2 = gen_reg_rtx (Pmode);
8697 if (TARGET_64BIT)
8698 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8699 else
8700 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8701 emit_insn (insn);
8702 if (TARGET_64BIT)
8703 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8704 else
8705 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8707 else
8709 tmp2 = gen_reg_rtx (Pmode);
8710 if (TARGET_64BIT)
8711 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8712 else
8713 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8714 emit_insn (insn);
8715 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8717 emit_insn (insn);
8719 else
8721 /* IE, or 64-bit offset LE. */
8722 tmp2 = gen_reg_rtx (Pmode);
8723 if (TARGET_64BIT)
8724 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8725 else
8726 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8727 emit_insn (insn);
8728 if (TARGET_64BIT)
8729 insn = gen_tls_tls_64 (dest, tmp2, addr);
8730 else
8731 insn = gen_tls_tls_32 (dest, tmp2, addr);
8732 emit_insn (insn);
8736 return dest;
8739 /* Only create the global variable for the stack protect guard if we are using
8740 the global flavor of that guard. */
8741 static tree
8742 rs6000_init_stack_protect_guard (void)
8744 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8745 return default_stack_protect_guard ();
8747 return NULL_TREE;
8750 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8752 static bool
8753 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8755 if (GET_CODE (x) == HIGH
8756 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8757 return true;
8759 /* A TLS symbol in the TOC cannot contain a sum. */
8760 if (GET_CODE (x) == CONST
8761 && GET_CODE (XEXP (x, 0)) == PLUS
8762 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8763 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8764 return true;
8766 /* Do not place an ELF TLS symbol in the constant pool. */
8767 return TARGET_ELF && tls_referenced_p (x);
8770 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8771 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8772 can be addressed relative to the toc pointer. */
8774 static bool
8775 use_toc_relative_ref (rtx sym, machine_mode mode)
8777 return ((constant_pool_expr_p (sym)
8778 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8779 get_pool_mode (sym)))
8780 || (TARGET_CMODEL == CMODEL_MEDIUM
8781 && SYMBOL_REF_LOCAL_P (sym)
8782 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8785 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8786 that is a valid memory address for an instruction.
8787 The MODE argument is the machine mode for the MEM expression
8788 that wants to use this address.
8790 On the RS/6000, there are four valid address: a SYMBOL_REF that
8791 refers to a constant pool entry of an address (or the sum of it
8792 plus a constant), a short (16-bit signed) constant plus a register,
8793 the sum of two registers, or a register indirect, possibly with an
8794 auto-increment. For DFmode, DDmode and DImode with a constant plus
8795 register, we must ensure that both words are addressable or PowerPC64
8796 with offset word aligned.
8798 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8799 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8800 because adjacent memory cells are accessed by adding word-sized offsets
8801 during assembly output. */
8802 static bool
8803 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8805 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8806 bool quad_offset_p = mode_supports_dq_form (mode);
8808 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8809 if (VECTOR_MEM_ALTIVEC_P (mode)
8810 && GET_CODE (x) == AND
8811 && CONST_INT_P (XEXP (x, 1))
8812 && INTVAL (XEXP (x, 1)) == -16)
8813 x = XEXP (x, 0);
8815 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8816 return 0;
8817 if (legitimate_indirect_address_p (x, reg_ok_strict))
8818 return 1;
8819 if (TARGET_UPDATE
8820 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8821 && mode_supports_pre_incdec_p (mode)
8822 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8823 return 1;
8824 /* Handle restricted vector d-form offsets in ISA 3.0. */
8825 if (quad_offset_p)
8827 if (quad_address_p (x, mode, reg_ok_strict))
8828 return 1;
8830 else if (virtual_stack_registers_memory_p (x))
8831 return 1;
8833 else if (reg_offset_p)
8835 if (legitimate_small_data_p (mode, x))
8836 return 1;
8837 if (legitimate_constant_pool_address_p (x, mode,
8838 reg_ok_strict || lra_in_progress))
8839 return 1;
8842 /* For TImode, if we have TImode in VSX registers, only allow register
8843 indirect addresses. This will allow the values to go in either GPRs
8844 or VSX registers without reloading. The vector types would tend to
8845 go into VSX registers, so we allow REG+REG, while TImode seems
8846 somewhat split, in that some uses are GPR based, and some VSX based. */
8847 /* FIXME: We could loosen this by changing the following to
8848 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8849 but currently we cannot allow REG+REG addressing for TImode. See
8850 PR72827 for complete details on how this ends up hoodwinking DSE. */
8851 if (mode == TImode && TARGET_VSX)
8852 return 0;
8853 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8854 if (! reg_ok_strict
8855 && reg_offset_p
8856 && GET_CODE (x) == PLUS
8857 && REG_P (XEXP (x, 0))
8858 && (XEXP (x, 0) == virtual_stack_vars_rtx
8859 || XEXP (x, 0) == arg_pointer_rtx)
8860 && CONST_INT_P (XEXP (x, 1)))
8861 return 1;
8862 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8863 return 1;
8864 if (!FLOAT128_2REG_P (mode)
8865 && (TARGET_HARD_FLOAT
8866 || TARGET_POWERPC64
8867 || (mode != DFmode && mode != DDmode))
8868 && (TARGET_POWERPC64 || mode != DImode)
8869 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8870 && mode != PTImode
8871 && !avoiding_indexed_address_p (mode)
8872 && legitimate_indexed_address_p (x, reg_ok_strict))
8873 return 1;
8874 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8875 && mode_supports_pre_modify_p (mode)
8876 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8877 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8878 reg_ok_strict, false)
8879 || (!avoiding_indexed_address_p (mode)
8880 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8881 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8882 return 1;
8883 if (reg_offset_p && !quad_offset_p
8884 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8885 return 1;
8886 return 0;
8889 /* Debug version of rs6000_legitimate_address_p. */
8890 static bool
8891 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8892 bool reg_ok_strict)
8894 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8895 fprintf (stderr,
8896 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8897 "strict = %d, reload = %s, code = %s\n",
8898 ret ? "true" : "false",
8899 GET_MODE_NAME (mode),
8900 reg_ok_strict,
8901 (reload_completed ? "after" : "before"),
8902 GET_RTX_NAME (GET_CODE (x)));
8903 debug_rtx (x);
8905 return ret;
8908 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8910 static bool
8911 rs6000_mode_dependent_address_p (const_rtx addr,
8912 addr_space_t as ATTRIBUTE_UNUSED)
8914 return rs6000_mode_dependent_address_ptr (addr);
8917 /* Go to LABEL if ADDR (a legitimate address expression)
8918 has an effect that depends on the machine mode it is used for.
8920 On the RS/6000 this is true of all integral offsets (since AltiVec
8921 and VSX modes don't allow them) or is a pre-increment or decrement.
8923 ??? Except that due to conceptual problems in offsettable_address_p
8924 we can't really report the problems of integral offsets. So leave
8925 this assuming that the adjustable offset must be valid for the
8926 sub-words of a TFmode operand, which is what we had before. */
8928 static bool
8929 rs6000_mode_dependent_address (const_rtx addr)
8931 switch (GET_CODE (addr))
8933 case PLUS:
8934 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8935 is considered a legitimate address before reload, so there
8936 are no offset restrictions in that case. Note that this
8937 condition is safe in strict mode because any address involving
8938 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8939 been rejected as illegitimate. */
8940 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8941 && XEXP (addr, 0) != arg_pointer_rtx
8942 && CONST_INT_P (XEXP (addr, 1)))
8944 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8945 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8947 break;
8949 case LO_SUM:
8950 /* Anything in the constant pool is sufficiently aligned that
8951 all bytes have the same high part address. */
8952 return !legitimate_constant_pool_address_p (addr, QImode, false);
8954 /* Auto-increment cases are now treated generically in recog.c. */
8955 case PRE_MODIFY:
8956 return TARGET_UPDATE;
8958 /* AND is only allowed in Altivec loads. */
8959 case AND:
8960 return true;
8962 default:
8963 break;
8966 return false;
8969 /* Debug version of rs6000_mode_dependent_address. */
8970 static bool
8971 rs6000_debug_mode_dependent_address (const_rtx addr)
8973 bool ret = rs6000_mode_dependent_address (addr);
8975 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
8976 ret ? "true" : "false");
8977 debug_rtx (addr);
8979 return ret;
8982 /* Implement FIND_BASE_TERM. */
8985 rs6000_find_base_term (rtx op)
8987 rtx base;
8989 base = op;
8990 if (GET_CODE (base) == CONST)
8991 base = XEXP (base, 0);
8992 if (GET_CODE (base) == PLUS)
8993 base = XEXP (base, 0);
8994 if (GET_CODE (base) == UNSPEC)
8995 switch (XINT (base, 1))
8997 case UNSPEC_TOCREL:
8998 case UNSPEC_MACHOPIC_OFFSET:
8999 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9000 for aliasing purposes. */
9001 return XVECEXP (base, 0, 0);
9004 return op;
9007 /* More elaborate version of recog's offsettable_memref_p predicate
9008 that works around the ??? note of rs6000_mode_dependent_address.
9009 In particular it accepts
9011 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9013 in 32-bit mode, that the recog predicate rejects. */
9015 static bool
9016 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9018 bool worst_case;
9020 if (!MEM_P (op))
9021 return false;
9023 /* First mimic offsettable_memref_p. */
9024 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9025 return true;
9027 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9028 the latter predicate knows nothing about the mode of the memory
9029 reference and, therefore, assumes that it is the largest supported
9030 mode (TFmode). As a consequence, legitimate offsettable memory
9031 references are rejected. rs6000_legitimate_offset_address_p contains
9032 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9033 at least with a little bit of help here given that we know the
9034 actual registers used. */
9035 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9036 || GET_MODE_SIZE (reg_mode) == 4);
9037 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9038 strict, worst_case);
9041 /* Determine the reassociation width to be used in reassociate_bb.
9042 This takes into account how many parallel operations we
9043 can actually do of a given type, and also the latency.
9045 int add/sub 6/cycle
9046 mul 2/cycle
9047 vect add/sub/mul 2/cycle
9048 fp add/sub/mul 2/cycle
9049 dfp 1/cycle
9052 static int
9053 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9054 machine_mode mode)
9056 switch (rs6000_tune)
9058 case PROCESSOR_POWER8:
9059 case PROCESSOR_POWER9:
9060 case PROCESSOR_FUTURE:
9061 if (DECIMAL_FLOAT_MODE_P (mode))
9062 return 1;
9063 if (VECTOR_MODE_P (mode))
9064 return 4;
9065 if (INTEGRAL_MODE_P (mode))
9066 return 1;
9067 if (FLOAT_MODE_P (mode))
9068 return 4;
9069 break;
9070 default:
9071 break;
9073 return 1;
9076 /* Change register usage conditional on target flags. */
9077 static void
9078 rs6000_conditional_register_usage (void)
9080 int i;
9082 if (TARGET_DEBUG_TARGET)
9083 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9085 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9086 if (TARGET_64BIT)
9087 fixed_regs[13] = call_used_regs[13]
9088 = call_really_used_regs[13] = 1;
9090 /* Conditionally disable FPRs. */
9091 if (TARGET_SOFT_FLOAT)
9092 for (i = 32; i < 64; i++)
9093 fixed_regs[i] = call_used_regs[i]
9094 = call_really_used_regs[i] = 1;
9096 /* The TOC register is not killed across calls in a way that is
9097 visible to the compiler. */
9098 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9099 call_really_used_regs[2] = 0;
9101 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9102 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9104 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9105 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9106 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9107 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9109 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9110 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9111 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9112 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9114 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9115 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9116 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9118 if (!TARGET_ALTIVEC && !TARGET_VSX)
9120 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9121 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9122 call_really_used_regs[VRSAVE_REGNO] = 1;
9125 if (TARGET_ALTIVEC || TARGET_VSX)
9126 global_regs[VSCR_REGNO] = 1;
9128 if (TARGET_ALTIVEC_ABI)
9130 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9131 call_used_regs[i] = call_really_used_regs[i] = 1;
9133 /* AIX reserves VR20:31 in non-extended ABI mode. */
9134 if (TARGET_XCOFF)
9135 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9136 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9141 /* Output insns to set DEST equal to the constant SOURCE as a series of
9142 lis, ori and shl instructions and return TRUE. */
9144 bool
9145 rs6000_emit_set_const (rtx dest, rtx source)
9147 machine_mode mode = GET_MODE (dest);
9148 rtx temp, set;
9149 rtx_insn *insn;
9150 HOST_WIDE_INT c;
9152 gcc_checking_assert (CONST_INT_P (source));
9153 c = INTVAL (source);
9154 switch (mode)
9156 case E_QImode:
9157 case E_HImode:
9158 emit_insn (gen_rtx_SET (dest, source));
9159 return true;
9161 case E_SImode:
9162 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9164 emit_insn (gen_rtx_SET (copy_rtx (temp),
9165 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9166 emit_insn (gen_rtx_SET (dest,
9167 gen_rtx_IOR (SImode, copy_rtx (temp),
9168 GEN_INT (c & 0xffff))));
9169 break;
9171 case E_DImode:
9172 if (!TARGET_POWERPC64)
9174 rtx hi, lo;
9176 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9177 DImode);
9178 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9179 DImode);
9180 emit_move_insn (hi, GEN_INT (c >> 32));
9181 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9182 emit_move_insn (lo, GEN_INT (c));
9184 else
9185 rs6000_emit_set_long_const (dest, c);
9186 break;
9188 default:
9189 gcc_unreachable ();
9192 insn = get_last_insn ();
9193 set = single_set (insn);
9194 if (! CONSTANT_P (SET_SRC (set)))
9195 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9197 return true;
9200 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9201 Output insns to set DEST equal to the constant C as a series of
9202 lis, ori and shl instructions. */
9204 static void
9205 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9207 rtx temp;
9208 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9210 ud1 = c & 0xffff;
9211 c = c >> 16;
9212 ud2 = c & 0xffff;
9213 c = c >> 16;
9214 ud3 = c & 0xffff;
9215 c = c >> 16;
9216 ud4 = c & 0xffff;
9218 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9219 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9220 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9222 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9223 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9225 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9227 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9228 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9229 if (ud1 != 0)
9230 emit_move_insn (dest,
9231 gen_rtx_IOR (DImode, copy_rtx (temp),
9232 GEN_INT (ud1)));
9234 else if (ud3 == 0 && ud4 == 0)
9236 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9238 gcc_assert (ud2 & 0x8000);
9239 emit_move_insn (copy_rtx (temp),
9240 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9241 if (ud1 != 0)
9242 emit_move_insn (copy_rtx (temp),
9243 gen_rtx_IOR (DImode, copy_rtx (temp),
9244 GEN_INT (ud1)));
9245 emit_move_insn (dest,
9246 gen_rtx_ZERO_EXTEND (DImode,
9247 gen_lowpart (SImode,
9248 copy_rtx (temp))));
9250 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9251 || (ud4 == 0 && ! (ud3 & 0x8000)))
9253 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9255 emit_move_insn (copy_rtx (temp),
9256 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9257 if (ud2 != 0)
9258 emit_move_insn (copy_rtx (temp),
9259 gen_rtx_IOR (DImode, copy_rtx (temp),
9260 GEN_INT (ud2)));
9261 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9262 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9263 GEN_INT (16)));
9264 if (ud1 != 0)
9265 emit_move_insn (dest,
9266 gen_rtx_IOR (DImode, copy_rtx (temp),
9267 GEN_INT (ud1)));
9269 else
9271 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9273 emit_move_insn (copy_rtx (temp),
9274 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9275 if (ud3 != 0)
9276 emit_move_insn (copy_rtx (temp),
9277 gen_rtx_IOR (DImode, copy_rtx (temp),
9278 GEN_INT (ud3)));
9280 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9281 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9282 GEN_INT (32)));
9283 if (ud2 != 0)
9284 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9285 gen_rtx_IOR (DImode, copy_rtx (temp),
9286 GEN_INT (ud2 << 16)));
9287 if (ud1 != 0)
9288 emit_move_insn (dest,
9289 gen_rtx_IOR (DImode, copy_rtx (temp),
9290 GEN_INT (ud1)));
9294 /* Helper for the following. Get rid of [r+r] memory refs
9295 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9297 static void
9298 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9300 if (MEM_P (operands[0])
9301 && !REG_P (XEXP (operands[0], 0))
9302 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9303 GET_MODE (operands[0]), false))
9304 operands[0]
9305 = replace_equiv_address (operands[0],
9306 copy_addr_to_reg (XEXP (operands[0], 0)));
9308 if (MEM_P (operands[1])
9309 && !REG_P (XEXP (operands[1], 0))
9310 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9311 GET_MODE (operands[1]), false))
9312 operands[1]
9313 = replace_equiv_address (operands[1],
9314 copy_addr_to_reg (XEXP (operands[1], 0)));
9317 /* Generate a vector of constants to permute MODE for a little-endian
9318 storage operation by swapping the two halves of a vector. */
9319 static rtvec
9320 rs6000_const_vec (machine_mode mode)
9322 int i, subparts;
9323 rtvec v;
9325 switch (mode)
9327 case E_V1TImode:
9328 subparts = 1;
9329 break;
9330 case E_V2DFmode:
9331 case E_V2DImode:
9332 subparts = 2;
9333 break;
9334 case E_V4SFmode:
9335 case E_V4SImode:
9336 subparts = 4;
9337 break;
9338 case E_V8HImode:
9339 subparts = 8;
9340 break;
9341 case E_V16QImode:
9342 subparts = 16;
9343 break;
9344 default:
9345 gcc_unreachable();
9348 v = rtvec_alloc (subparts);
9350 for (i = 0; i < subparts / 2; ++i)
9351 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9352 for (i = subparts / 2; i < subparts; ++i)
9353 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9355 return v;
9358 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9359 store operation. */
9360 void
9361 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9363 /* Scalar permutations are easier to express in integer modes rather than
9364 floating-point modes, so cast them here. We use V1TImode instead
9365 of TImode to ensure that the values don't go through GPRs. */
9366 if (FLOAT128_VECTOR_P (mode))
9368 dest = gen_lowpart (V1TImode, dest);
9369 source = gen_lowpart (V1TImode, source);
9370 mode = V1TImode;
9373 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9374 scalar. */
9375 if (mode == TImode || mode == V1TImode)
9376 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9377 GEN_INT (64))));
9378 else
9380 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9381 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9385 /* Emit a little-endian load from vector memory location SOURCE to VSX
9386 register DEST in mode MODE. The load is done with two permuting
9387 insn's that represent an lxvd2x and xxpermdi. */
9388 void
9389 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9391 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9392 V1TImode). */
9393 if (mode == TImode || mode == V1TImode)
9395 mode = V2DImode;
9396 dest = gen_lowpart (V2DImode, dest);
9397 source = adjust_address (source, V2DImode, 0);
9400 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9401 rs6000_emit_le_vsx_permute (tmp, source, mode);
9402 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9405 /* Emit a little-endian store to vector memory location DEST from VSX
9406 register SOURCE in mode MODE. The store is done with two permuting
9407 insn's that represent an xxpermdi and an stxvd2x. */
9408 void
9409 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9411 /* This should never be called during or after LRA, because it does
9412 not re-permute the source register. It is intended only for use
9413 during expand. */
9414 gcc_assert (!lra_in_progress && !reload_completed);
9416 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9417 V1TImode). */
9418 if (mode == TImode || mode == V1TImode)
9420 mode = V2DImode;
9421 dest = adjust_address (dest, V2DImode, 0);
9422 source = gen_lowpart (V2DImode, source);
9425 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9426 rs6000_emit_le_vsx_permute (tmp, source, mode);
9427 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9430 /* Emit a sequence representing a little-endian VSX load or store,
9431 moving data from SOURCE to DEST in mode MODE. This is done
9432 separately from rs6000_emit_move to ensure it is called only
9433 during expand. LE VSX loads and stores introduced later are
9434 handled with a split. The expand-time RTL generation allows
9435 us to optimize away redundant pairs of register-permutes. */
9436 void
9437 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9439 gcc_assert (!BYTES_BIG_ENDIAN
9440 && VECTOR_MEM_VSX_P (mode)
9441 && !TARGET_P9_VECTOR
9442 && !gpr_or_gpr_p (dest, source)
9443 && (MEM_P (source) ^ MEM_P (dest)));
9445 if (MEM_P (source))
9447 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9448 rs6000_emit_le_vsx_load (dest, source, mode);
9450 else
9452 if (!REG_P (source))
9453 source = force_reg (mode, source);
9454 rs6000_emit_le_vsx_store (dest, source, mode);
9458 /* Return whether a SFmode or SImode move can be done without converting one
9459 mode to another. This arrises when we have:
9461 (SUBREG:SF (REG:SI ...))
9462 (SUBREG:SI (REG:SF ...))
9464 and one of the values is in a floating point/vector register, where SFmode
9465 scalars are stored in DFmode format. */
9467 bool
9468 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9470 if (TARGET_ALLOW_SF_SUBREG)
9471 return true;
9473 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9474 return true;
9476 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9477 return true;
9479 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9480 if (SUBREG_P (dest))
9482 rtx dest_subreg = SUBREG_REG (dest);
9483 rtx src_subreg = SUBREG_REG (src);
9484 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9487 return false;
9491 /* Helper function to change moves with:
9493 (SUBREG:SF (REG:SI)) and
9494 (SUBREG:SI (REG:SF))
9496 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9497 values are stored as DFmode values in the VSX registers. We need to convert
9498 the bits before we can use a direct move or operate on the bits in the
9499 vector register as an integer type.
9501 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9503 static bool
9504 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9506 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9507 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9508 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9510 rtx inner_source = SUBREG_REG (source);
9511 machine_mode inner_mode = GET_MODE (inner_source);
9513 if (mode == SImode && inner_mode == SFmode)
9515 emit_insn (gen_movsi_from_sf (dest, inner_source));
9516 return true;
9519 if (mode == SFmode && inner_mode == SImode)
9521 emit_insn (gen_movsf_from_si (dest, inner_source));
9522 return true;
9526 return false;
9529 /* Emit a move from SOURCE to DEST in mode MODE. */
9530 void
9531 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9533 rtx operands[2];
9534 operands[0] = dest;
9535 operands[1] = source;
9537 if (TARGET_DEBUG_ADDR)
9539 fprintf (stderr,
9540 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9541 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9542 GET_MODE_NAME (mode),
9543 lra_in_progress,
9544 reload_completed,
9545 can_create_pseudo_p ());
9546 debug_rtx (dest);
9547 fprintf (stderr, "source:\n");
9548 debug_rtx (source);
9551 /* Check that we get CONST_WIDE_INT only when we should. */
9552 if (CONST_WIDE_INT_P (operands[1])
9553 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9554 gcc_unreachable ();
9556 #ifdef HAVE_AS_GNU_ATTRIBUTE
9557 /* If we use a long double type, set the flags in .gnu_attribute that say
9558 what the long double type is. This is to allow the linker's warning
9559 message for the wrong long double to be useful, even if the function does
9560 not do a call (for example, doing a 128-bit add on power9 if the long
9561 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9562 used if they aren't the default long dobule type. */
9563 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9565 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9566 rs6000_passes_float = rs6000_passes_long_double = true;
9568 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9569 rs6000_passes_float = rs6000_passes_long_double = true;
9571 #endif
9573 /* See if we need to special case SImode/SFmode SUBREG moves. */
9574 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9575 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9576 return;
9578 /* Check if GCC is setting up a block move that will end up using FP
9579 registers as temporaries. We must make sure this is acceptable. */
9580 if (MEM_P (operands[0])
9581 && MEM_P (operands[1])
9582 && mode == DImode
9583 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9584 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9585 && ! (rs6000_slow_unaligned_access (SImode,
9586 (MEM_ALIGN (operands[0]) > 32
9587 ? 32 : MEM_ALIGN (operands[0])))
9588 || rs6000_slow_unaligned_access (SImode,
9589 (MEM_ALIGN (operands[1]) > 32
9590 ? 32 : MEM_ALIGN (operands[1]))))
9591 && ! MEM_VOLATILE_P (operands [0])
9592 && ! MEM_VOLATILE_P (operands [1]))
9594 emit_move_insn (adjust_address (operands[0], SImode, 0),
9595 adjust_address (operands[1], SImode, 0));
9596 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9597 adjust_address (copy_rtx (operands[1]), SImode, 4));
9598 return;
9601 if (can_create_pseudo_p () && MEM_P (operands[0])
9602 && !gpc_reg_operand (operands[1], mode))
9603 operands[1] = force_reg (mode, operands[1]);
9605 /* Recognize the case where operand[1] is a reference to thread-local
9606 data and load its address to a register. */
9607 if (tls_referenced_p (operands[1]))
9609 enum tls_model model;
9610 rtx tmp = operands[1];
9611 rtx addend = NULL;
9613 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9615 addend = XEXP (XEXP (tmp, 0), 1);
9616 tmp = XEXP (XEXP (tmp, 0), 0);
9619 gcc_assert (SYMBOL_REF_P (tmp));
9620 model = SYMBOL_REF_TLS_MODEL (tmp);
9621 gcc_assert (model != 0);
9623 tmp = rs6000_legitimize_tls_address (tmp, model);
9624 if (addend)
9626 tmp = gen_rtx_PLUS (mode, tmp, addend);
9627 tmp = force_operand (tmp, operands[0]);
9629 operands[1] = tmp;
9632 /* 128-bit constant floating-point values on Darwin should really be loaded
9633 as two parts. However, this premature splitting is a problem when DFmode
9634 values can go into Altivec registers. */
9635 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9636 && !reg_addr[DFmode].scalar_in_vmx_p)
9638 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9639 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9640 DFmode);
9641 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9642 GET_MODE_SIZE (DFmode)),
9643 simplify_gen_subreg (DFmode, operands[1], mode,
9644 GET_MODE_SIZE (DFmode)),
9645 DFmode);
9646 return;
9649 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9650 p1:SD) if p1 is not of floating point class and p0 is spilled as
9651 we can have no analogous movsd_store for this. */
9652 if (lra_in_progress && mode == DDmode
9653 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9654 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9655 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9656 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9658 enum reg_class cl;
9659 int regno = REGNO (SUBREG_REG (operands[1]));
9661 if (!HARD_REGISTER_NUM_P (regno))
9663 cl = reg_preferred_class (regno);
9664 regno = reg_renumber[regno];
9665 if (regno < 0)
9666 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9668 if (regno >= 0 && ! FP_REGNO_P (regno))
9670 mode = SDmode;
9671 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9672 operands[1] = SUBREG_REG (operands[1]);
9675 if (lra_in_progress
9676 && mode == SDmode
9677 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9678 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9679 && (REG_P (operands[1])
9680 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9682 int regno = reg_or_subregno (operands[1]);
9683 enum reg_class cl;
9685 if (!HARD_REGISTER_NUM_P (regno))
9687 cl = reg_preferred_class (regno);
9688 gcc_assert (cl != NO_REGS);
9689 regno = reg_renumber[regno];
9690 if (regno < 0)
9691 regno = ira_class_hard_regs[cl][0];
9693 if (FP_REGNO_P (regno))
9695 if (GET_MODE (operands[0]) != DDmode)
9696 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9697 emit_insn (gen_movsd_store (operands[0], operands[1]));
9699 else if (INT_REGNO_P (regno))
9700 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9701 else
9702 gcc_unreachable();
9703 return;
9705 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9706 p:DD)) if p0 is not of floating point class and p1 is spilled as
9707 we can have no analogous movsd_load for this. */
9708 if (lra_in_progress && mode == DDmode
9709 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9710 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9711 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9712 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9714 enum reg_class cl;
9715 int regno = REGNO (SUBREG_REG (operands[0]));
9717 if (!HARD_REGISTER_NUM_P (regno))
9719 cl = reg_preferred_class (regno);
9720 regno = reg_renumber[regno];
9721 if (regno < 0)
9722 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9724 if (regno >= 0 && ! FP_REGNO_P (regno))
9726 mode = SDmode;
9727 operands[0] = SUBREG_REG (operands[0]);
9728 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9731 if (lra_in_progress
9732 && mode == SDmode
9733 && (REG_P (operands[0])
9734 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9735 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9736 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9738 int regno = reg_or_subregno (operands[0]);
9739 enum reg_class cl;
9741 if (!HARD_REGISTER_NUM_P (regno))
9743 cl = reg_preferred_class (regno);
9744 gcc_assert (cl != NO_REGS);
9745 regno = reg_renumber[regno];
9746 if (regno < 0)
9747 regno = ira_class_hard_regs[cl][0];
9749 if (FP_REGNO_P (regno))
9751 if (GET_MODE (operands[1]) != DDmode)
9752 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9753 emit_insn (gen_movsd_load (operands[0], operands[1]));
9755 else if (INT_REGNO_P (regno))
9756 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9757 else
9758 gcc_unreachable();
9759 return;
9762 /* FIXME: In the long term, this switch statement should go away
9763 and be replaced by a sequence of tests based on things like
9764 mode == Pmode. */
9765 switch (mode)
9767 case E_HImode:
9768 case E_QImode:
9769 if (CONSTANT_P (operands[1])
9770 && !CONST_INT_P (operands[1]))
9771 operands[1] = force_const_mem (mode, operands[1]);
9772 break;
9774 case E_TFmode:
9775 case E_TDmode:
9776 case E_IFmode:
9777 case E_KFmode:
9778 if (FLOAT128_2REG_P (mode))
9779 rs6000_eliminate_indexed_memrefs (operands);
9780 /* fall through */
9782 case E_DFmode:
9783 case E_DDmode:
9784 case E_SFmode:
9785 case E_SDmode:
9786 if (CONSTANT_P (operands[1])
9787 && ! easy_fp_constant (operands[1], mode))
9788 operands[1] = force_const_mem (mode, operands[1]);
9789 break;
9791 case E_V16QImode:
9792 case E_V8HImode:
9793 case E_V4SFmode:
9794 case E_V4SImode:
9795 case E_V2DFmode:
9796 case E_V2DImode:
9797 case E_V1TImode:
9798 if (CONSTANT_P (operands[1])
9799 && !easy_vector_constant (operands[1], mode))
9800 operands[1] = force_const_mem (mode, operands[1]);
9801 break;
9803 case E_SImode:
9804 case E_DImode:
9805 /* Use default pattern for address of ELF small data */
9806 if (TARGET_ELF
9807 && mode == Pmode
9808 && DEFAULT_ABI == ABI_V4
9809 && (SYMBOL_REF_P (operands[1])
9810 || GET_CODE (operands[1]) == CONST)
9811 && small_data_operand (operands[1], mode))
9813 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9814 return;
9817 if (DEFAULT_ABI == ABI_V4
9818 && mode == Pmode && mode == SImode
9819 && flag_pic == 1 && got_operand (operands[1], mode))
9821 emit_insn (gen_movsi_got (operands[0], operands[1]));
9822 return;
9825 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9826 && TARGET_NO_TOC_OR_PCREL
9827 && ! flag_pic
9828 && mode == Pmode
9829 && CONSTANT_P (operands[1])
9830 && GET_CODE (operands[1]) != HIGH
9831 && !CONST_INT_P (operands[1]))
9833 rtx target = (!can_create_pseudo_p ()
9834 ? operands[0]
9835 : gen_reg_rtx (mode));
9837 /* If this is a function address on -mcall-aixdesc,
9838 convert it to the address of the descriptor. */
9839 if (DEFAULT_ABI == ABI_AIX
9840 && SYMBOL_REF_P (operands[1])
9841 && XSTR (operands[1], 0)[0] == '.')
9843 const char *name = XSTR (operands[1], 0);
9844 rtx new_ref;
9845 while (*name == '.')
9846 name++;
9847 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9848 CONSTANT_POOL_ADDRESS_P (new_ref)
9849 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9850 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9851 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9852 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9853 operands[1] = new_ref;
9856 if (DEFAULT_ABI == ABI_DARWIN)
9858 #if TARGET_MACHO
9859 if (MACHO_DYNAMIC_NO_PIC_P)
9861 /* Take care of any required data indirection. */
9862 operands[1] = rs6000_machopic_legitimize_pic_address (
9863 operands[1], mode, operands[0]);
9864 if (operands[0] != operands[1])
9865 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9866 return;
9868 #endif
9869 emit_insn (gen_macho_high (target, operands[1]));
9870 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9871 return;
9874 emit_insn (gen_elf_high (target, operands[1]));
9875 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9876 return;
9879 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9880 and we have put it in the TOC, we just need to make a TOC-relative
9881 reference to it. */
9882 if (TARGET_TOC
9883 && SYMBOL_REF_P (operands[1])
9884 && use_toc_relative_ref (operands[1], mode))
9885 operands[1] = create_TOC_reference (operands[1], operands[0]);
9886 else if (mode == Pmode
9887 && CONSTANT_P (operands[1])
9888 && GET_CODE (operands[1]) != HIGH
9889 && ((REG_P (operands[0])
9890 && FP_REGNO_P (REGNO (operands[0])))
9891 || !CONST_INT_P (operands[1])
9892 || (num_insns_constant (operands[1], mode)
9893 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9894 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9895 && (TARGET_CMODEL == CMODEL_SMALL
9896 || can_create_pseudo_p ()
9897 || (REG_P (operands[0])
9898 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9901 #if TARGET_MACHO
9902 /* Darwin uses a special PIC legitimizer. */
9903 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9905 operands[1] =
9906 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9907 operands[0]);
9908 if (operands[0] != operands[1])
9909 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9910 return;
9912 #endif
9914 /* If we are to limit the number of things we put in the TOC and
9915 this is a symbol plus a constant we can add in one insn,
9916 just put the symbol in the TOC and add the constant. */
9917 if (GET_CODE (operands[1]) == CONST
9918 && TARGET_NO_SUM_IN_TOC
9919 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9920 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9921 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9922 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
9923 && ! side_effects_p (operands[0]))
9925 rtx sym =
9926 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9927 rtx other = XEXP (XEXP (operands[1], 0), 1);
9929 sym = force_reg (mode, sym);
9930 emit_insn (gen_add3_insn (operands[0], sym, other));
9931 return;
9934 operands[1] = force_const_mem (mode, operands[1]);
9936 if (TARGET_TOC
9937 && SYMBOL_REF_P (XEXP (operands[1], 0))
9938 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
9940 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9941 operands[0]);
9942 operands[1] = gen_const_mem (mode, tocref);
9943 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9946 break;
9948 case E_TImode:
9949 if (!VECTOR_MEM_VSX_P (TImode))
9950 rs6000_eliminate_indexed_memrefs (operands);
9951 break;
9953 case E_PTImode:
9954 rs6000_eliminate_indexed_memrefs (operands);
9955 break;
9957 default:
9958 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9961 /* Above, we may have called force_const_mem which may have returned
9962 an invalid address. If we can, fix this up; otherwise, reload will
9963 have to deal with it. */
9964 if (MEM_P (operands[1]))
9965 operands[1] = validize_mem (operands[1]);
9967 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9970 /* Nonzero if we can use a floating-point register to pass this arg. */
9971 #define USE_FP_FOR_ARG_P(CUM,MODE) \
9972 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
9973 && (CUM)->fregno <= FP_ARG_MAX_REG \
9974 && TARGET_HARD_FLOAT)
9976 /* Nonzero if we can use an AltiVec register to pass this arg. */
9977 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
9978 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
9979 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
9980 && TARGET_ALTIVEC_ABI \
9981 && (NAMED))
9983 /* Walk down the type tree of TYPE counting consecutive base elements.
9984 If *MODEP is VOIDmode, then set it to the first valid floating point
9985 or vector type. If a non-floating point or vector type is found, or
9986 if a floating point or vector type that doesn't match a non-VOIDmode
9987 *MODEP is found, then return -1, otherwise return the count in the
9988 sub-tree. */
9990 static int
9991 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
9993 machine_mode mode;
9994 HOST_WIDE_INT size;
9996 switch (TREE_CODE (type))
9998 case REAL_TYPE:
9999 mode = TYPE_MODE (type);
10000 if (!SCALAR_FLOAT_MODE_P (mode))
10001 return -1;
10003 if (*modep == VOIDmode)
10004 *modep = mode;
10006 if (*modep == mode)
10007 return 1;
10009 break;
10011 case COMPLEX_TYPE:
10012 mode = TYPE_MODE (TREE_TYPE (type));
10013 if (!SCALAR_FLOAT_MODE_P (mode))
10014 return -1;
10016 if (*modep == VOIDmode)
10017 *modep = mode;
10019 if (*modep == mode)
10020 return 2;
10022 break;
10024 case VECTOR_TYPE:
10025 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10026 return -1;
10028 /* Use V4SImode as representative of all 128-bit vector types. */
10029 size = int_size_in_bytes (type);
10030 switch (size)
10032 case 16:
10033 mode = V4SImode;
10034 break;
10035 default:
10036 return -1;
10039 if (*modep == VOIDmode)
10040 *modep = mode;
10042 /* Vector modes are considered to be opaque: two vectors are
10043 equivalent for the purposes of being homogeneous aggregates
10044 if they are the same size. */
10045 if (*modep == mode)
10046 return 1;
10048 break;
10050 case ARRAY_TYPE:
10052 int count;
10053 tree index = TYPE_DOMAIN (type);
10055 /* Can't handle incomplete types nor sizes that are not
10056 fixed. */
10057 if (!COMPLETE_TYPE_P (type)
10058 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10059 return -1;
10061 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10062 if (count == -1
10063 || !index
10064 || !TYPE_MAX_VALUE (index)
10065 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10066 || !TYPE_MIN_VALUE (index)
10067 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10068 || count < 0)
10069 return -1;
10071 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10072 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10074 /* There must be no padding. */
10075 if (wi::to_wide (TYPE_SIZE (type))
10076 != count * GET_MODE_BITSIZE (*modep))
10077 return -1;
10079 return count;
10082 case RECORD_TYPE:
10084 int count = 0;
10085 int sub_count;
10086 tree field;
10088 /* Can't handle incomplete types nor sizes that are not
10089 fixed. */
10090 if (!COMPLETE_TYPE_P (type)
10091 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10092 return -1;
10094 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10096 if (TREE_CODE (field) != FIELD_DECL)
10097 continue;
10099 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10100 if (sub_count < 0)
10101 return -1;
10102 count += sub_count;
10105 /* There must be no padding. */
10106 if (wi::to_wide (TYPE_SIZE (type))
10107 != count * GET_MODE_BITSIZE (*modep))
10108 return -1;
10110 return count;
10113 case UNION_TYPE:
10114 case QUAL_UNION_TYPE:
10116 /* These aren't very interesting except in a degenerate case. */
10117 int count = 0;
10118 int sub_count;
10119 tree field;
10121 /* Can't handle incomplete types nor sizes that are not
10122 fixed. */
10123 if (!COMPLETE_TYPE_P (type)
10124 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10125 return -1;
10127 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10129 if (TREE_CODE (field) != FIELD_DECL)
10130 continue;
10132 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10133 if (sub_count < 0)
10134 return -1;
10135 count = count > sub_count ? count : sub_count;
10138 /* There must be no padding. */
10139 if (wi::to_wide (TYPE_SIZE (type))
10140 != count * GET_MODE_BITSIZE (*modep))
10141 return -1;
10143 return count;
10146 default:
10147 break;
10150 return -1;
10153 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10154 float or vector aggregate that shall be passed in FP/vector registers
10155 according to the ELFv2 ABI, return the homogeneous element mode in
10156 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10158 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10160 static bool
10161 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10162 machine_mode *elt_mode,
10163 int *n_elts)
10165 /* Note that we do not accept complex types at the top level as
10166 homogeneous aggregates; these types are handled via the
10167 targetm.calls.split_complex_arg mechanism. Complex types
10168 can be elements of homogeneous aggregates, however. */
10169 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10170 && AGGREGATE_TYPE_P (type))
10172 machine_mode field_mode = VOIDmode;
10173 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10175 if (field_count > 0)
10177 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10178 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10180 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10181 up to AGGR_ARG_NUM_REG registers. */
10182 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10184 if (elt_mode)
10185 *elt_mode = field_mode;
10186 if (n_elts)
10187 *n_elts = field_count;
10188 return true;
10193 if (elt_mode)
10194 *elt_mode = mode;
10195 if (n_elts)
10196 *n_elts = 1;
10197 return false;
10200 /* Return a nonzero value to say to return the function value in
10201 memory, just as large structures are always returned. TYPE will be
10202 the data type of the value, and FNTYPE will be the type of the
10203 function doing the returning, or @code{NULL} for libcalls.
10205 The AIX ABI for the RS/6000 specifies that all structures are
10206 returned in memory. The Darwin ABI does the same.
10208 For the Darwin 64 Bit ABI, a function result can be returned in
10209 registers or in memory, depending on the size of the return data
10210 type. If it is returned in registers, the value occupies the same
10211 registers as it would if it were the first and only function
10212 argument. Otherwise, the function places its result in memory at
10213 the location pointed to by GPR3.
10215 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10216 but a draft put them in memory, and GCC used to implement the draft
10217 instead of the final standard. Therefore, aix_struct_return
10218 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10219 compatibility can change DRAFT_V4_STRUCT_RET to override the
10220 default, and -m switches get the final word. See
10221 rs6000_option_override_internal for more details.
10223 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10224 long double support is enabled. These values are returned in memory.
10226 int_size_in_bytes returns -1 for variable size objects, which go in
10227 memory always. The cast to unsigned makes -1 > 8. */
10229 static bool
10230 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10232 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10233 if (TARGET_MACHO
10234 && rs6000_darwin64_abi
10235 && TREE_CODE (type) == RECORD_TYPE
10236 && int_size_in_bytes (type) > 0)
10238 CUMULATIVE_ARGS valcum;
10239 rtx valret;
10241 valcum.words = 0;
10242 valcum.fregno = FP_ARG_MIN_REG;
10243 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10244 /* Do a trial code generation as if this were going to be passed
10245 as an argument; if any part goes in memory, we return NULL. */
10246 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10247 if (valret)
10248 return false;
10249 /* Otherwise fall through to more conventional ABI rules. */
10252 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10253 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10254 NULL, NULL))
10255 return false;
10257 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10258 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10259 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10260 return false;
10262 if (AGGREGATE_TYPE_P (type)
10263 && (aix_struct_return
10264 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10265 return true;
10267 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10268 modes only exist for GCC vector types if -maltivec. */
10269 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10270 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10271 return false;
10273 /* Return synthetic vectors in memory. */
10274 if (TREE_CODE (type) == VECTOR_TYPE
10275 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10277 static bool warned_for_return_big_vectors = false;
10278 if (!warned_for_return_big_vectors)
10280 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10281 "non-standard ABI extension with no compatibility "
10282 "guarantee");
10283 warned_for_return_big_vectors = true;
10285 return true;
10288 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10289 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10290 return true;
10292 return false;
10295 /* Specify whether values returned in registers should be at the most
10296 significant end of a register. We want aggregates returned by
10297 value to match the way aggregates are passed to functions. */
10299 static bool
10300 rs6000_return_in_msb (const_tree valtype)
10302 return (DEFAULT_ABI == ABI_ELFv2
10303 && BYTES_BIG_ENDIAN
10304 && AGGREGATE_TYPE_P (valtype)
10305 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10306 == PAD_UPWARD));
10309 #ifdef HAVE_AS_GNU_ATTRIBUTE
10310 /* Return TRUE if a call to function FNDECL may be one that
10311 potentially affects the function calling ABI of the object file. */
10313 static bool
10314 call_ABI_of_interest (tree fndecl)
10316 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10318 struct cgraph_node *c_node;
10320 /* Libcalls are always interesting. */
10321 if (fndecl == NULL_TREE)
10322 return true;
10324 /* Any call to an external function is interesting. */
10325 if (DECL_EXTERNAL (fndecl))
10326 return true;
10328 /* Interesting functions that we are emitting in this object file. */
10329 c_node = cgraph_node::get (fndecl);
10330 c_node = c_node->ultimate_alias_target ();
10331 return !c_node->only_called_directly_p ();
10333 return false;
10335 #endif
10337 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10338 for a call to a function whose data type is FNTYPE.
10339 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10341 For incoming args we set the number of arguments in the prototype large
10342 so we never return a PARALLEL. */
10344 void
10345 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10346 rtx libname ATTRIBUTE_UNUSED, int incoming,
10347 int libcall, int n_named_args,
10348 tree fndecl,
10349 machine_mode return_mode ATTRIBUTE_UNUSED)
10351 static CUMULATIVE_ARGS zero_cumulative;
10353 *cum = zero_cumulative;
10354 cum->words = 0;
10355 cum->fregno = FP_ARG_MIN_REG;
10356 cum->vregno = ALTIVEC_ARG_MIN_REG;
10357 cum->prototype = (fntype && prototype_p (fntype));
10358 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10359 ? CALL_LIBCALL : CALL_NORMAL);
10360 cum->sysv_gregno = GP_ARG_MIN_REG;
10361 cum->stdarg = stdarg_p (fntype);
10362 cum->libcall = libcall;
10364 cum->nargs_prototype = 0;
10365 if (incoming || cum->prototype)
10366 cum->nargs_prototype = n_named_args;
10368 /* Check for a longcall attribute. */
10369 if ((!fntype && rs6000_default_long_calls)
10370 || (fntype
10371 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10372 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10373 cum->call_cookie |= CALL_LONG;
10374 else if (DEFAULT_ABI != ABI_DARWIN)
10376 bool is_local = (fndecl
10377 && !DECL_EXTERNAL (fndecl)
10378 && !DECL_WEAK (fndecl)
10379 && (*targetm.binds_local_p) (fndecl));
10380 if (is_local)
10382 else if (flag_plt)
10384 if (fntype
10385 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10386 cum->call_cookie |= CALL_LONG;
10388 else
10390 if (!(fntype
10391 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10392 cum->call_cookie |= CALL_LONG;
10396 if (TARGET_DEBUG_ARG)
10398 fprintf (stderr, "\ninit_cumulative_args:");
10399 if (fntype)
10401 tree ret_type = TREE_TYPE (fntype);
10402 fprintf (stderr, " ret code = %s,",
10403 get_tree_code_name (TREE_CODE (ret_type)));
10406 if (cum->call_cookie & CALL_LONG)
10407 fprintf (stderr, " longcall,");
10409 fprintf (stderr, " proto = %d, nargs = %d\n",
10410 cum->prototype, cum->nargs_prototype);
10413 #ifdef HAVE_AS_GNU_ATTRIBUTE
10414 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10416 cum->escapes = call_ABI_of_interest (fndecl);
10417 if (cum->escapes)
10419 tree return_type;
10421 if (fntype)
10423 return_type = TREE_TYPE (fntype);
10424 return_mode = TYPE_MODE (return_type);
10426 else
10427 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10429 if (return_type != NULL)
10431 if (TREE_CODE (return_type) == RECORD_TYPE
10432 && TYPE_TRANSPARENT_AGGR (return_type))
10434 return_type = TREE_TYPE (first_field (return_type));
10435 return_mode = TYPE_MODE (return_type);
10437 if (AGGREGATE_TYPE_P (return_type)
10438 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10439 <= 8))
10440 rs6000_returns_struct = true;
10442 if (SCALAR_FLOAT_MODE_P (return_mode))
10444 rs6000_passes_float = true;
10445 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10446 && (FLOAT128_IBM_P (return_mode)
10447 || FLOAT128_IEEE_P (return_mode)
10448 || (return_type != NULL
10449 && (TYPE_MAIN_VARIANT (return_type)
10450 == long_double_type_node))))
10451 rs6000_passes_long_double = true;
10453 /* Note if we passed or return a IEEE 128-bit type. We changed
10454 the mangling for these types, and we may need to make an alias
10455 with the old mangling. */
10456 if (FLOAT128_IEEE_P (return_mode))
10457 rs6000_passes_ieee128 = true;
10459 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10460 rs6000_passes_vector = true;
10463 #endif
10465 if (fntype
10466 && !TARGET_ALTIVEC
10467 && TARGET_ALTIVEC_ABI
10468 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10470 error ("cannot return value in vector register because"
10471 " altivec instructions are disabled, use %qs"
10472 " to enable them", "-maltivec");
10476 /* The mode the ABI uses for a word. This is not the same as word_mode
10477 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10479 static scalar_int_mode
10480 rs6000_abi_word_mode (void)
10482 return TARGET_32BIT ? SImode : DImode;
10485 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10486 static char *
10487 rs6000_offload_options (void)
10489 if (TARGET_64BIT)
10490 return xstrdup ("-foffload-abi=lp64");
10491 else
10492 return xstrdup ("-foffload-abi=ilp32");
10495 /* On rs6000, function arguments are promoted, as are function return
10496 values. */
10498 static machine_mode
10499 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10500 machine_mode mode,
10501 int *punsignedp ATTRIBUTE_UNUSED,
10502 const_tree, int)
10504 PROMOTE_MODE (mode, *punsignedp, type);
10506 return mode;
10509 /* Return true if TYPE must be passed on the stack and not in registers. */
10511 static bool
10512 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10514 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10515 return must_pass_in_stack_var_size (mode, type);
10516 else
10517 return must_pass_in_stack_var_size_or_pad (mode, type);
10520 static inline bool
10521 is_complex_IBM_long_double (machine_mode mode)
10523 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10526 /* Whether ABI_V4 passes MODE args to a function in floating point
10527 registers. */
10529 static bool
10530 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10532 if (!TARGET_HARD_FLOAT)
10533 return false;
10534 if (mode == DFmode)
10535 return true;
10536 if (mode == SFmode && named)
10537 return true;
10538 /* ABI_V4 passes complex IBM long double in 8 gprs.
10539 Stupid, but we can't change the ABI now. */
10540 if (is_complex_IBM_long_double (mode))
10541 return false;
10542 if (FLOAT128_2REG_P (mode))
10543 return true;
10544 if (DECIMAL_FLOAT_MODE_P (mode))
10545 return true;
10546 return false;
10549 /* Implement TARGET_FUNCTION_ARG_PADDING.
10551 For the AIX ABI structs are always stored left shifted in their
10552 argument slot. */
10554 static pad_direction
10555 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10557 #ifndef AGGREGATE_PADDING_FIXED
10558 #define AGGREGATE_PADDING_FIXED 0
10559 #endif
10560 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10561 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10562 #endif
10564 if (!AGGREGATE_PADDING_FIXED)
10566 /* GCC used to pass structures of the same size as integer types as
10567 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10568 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10569 passed padded downward, except that -mstrict-align further
10570 muddied the water in that multi-component structures of 2 and 4
10571 bytes in size were passed padded upward.
10573 The following arranges for best compatibility with previous
10574 versions of gcc, but removes the -mstrict-align dependency. */
10575 if (BYTES_BIG_ENDIAN)
10577 HOST_WIDE_INT size = 0;
10579 if (mode == BLKmode)
10581 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10582 size = int_size_in_bytes (type);
10584 else
10585 size = GET_MODE_SIZE (mode);
10587 if (size == 1 || size == 2 || size == 4)
10588 return PAD_DOWNWARD;
10590 return PAD_UPWARD;
10593 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10595 if (type != 0 && AGGREGATE_TYPE_P (type))
10596 return PAD_UPWARD;
10599 /* Fall back to the default. */
10600 return default_function_arg_padding (mode, type);
10603 /* If defined, a C expression that gives the alignment boundary, in bits,
10604 of an argument with the specified mode and type. If it is not defined,
10605 PARM_BOUNDARY is used for all arguments.
10607 V.4 wants long longs and doubles to be double word aligned. Just
10608 testing the mode size is a boneheaded way to do this as it means
10609 that other types such as complex int are also double word aligned.
10610 However, we're stuck with this because changing the ABI might break
10611 existing library interfaces.
10613 Quadword align Altivec/VSX vectors.
10614 Quadword align large synthetic vector types. */
10616 static unsigned int
10617 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10619 machine_mode elt_mode;
10620 int n_elts;
10622 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10624 if (DEFAULT_ABI == ABI_V4
10625 && (GET_MODE_SIZE (mode) == 8
10626 || (TARGET_HARD_FLOAT
10627 && !is_complex_IBM_long_double (mode)
10628 && FLOAT128_2REG_P (mode))))
10629 return 64;
10630 else if (FLOAT128_VECTOR_P (mode))
10631 return 128;
10632 else if (type && TREE_CODE (type) == VECTOR_TYPE
10633 && int_size_in_bytes (type) >= 8
10634 && int_size_in_bytes (type) < 16)
10635 return 64;
10636 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10637 || (type && TREE_CODE (type) == VECTOR_TYPE
10638 && int_size_in_bytes (type) >= 16))
10639 return 128;
10641 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10642 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10643 -mcompat-align-parm is used. */
10644 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10645 || DEFAULT_ABI == ABI_ELFv2)
10646 && type && TYPE_ALIGN (type) > 64)
10648 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10649 or homogeneous float/vector aggregates here. We already handled
10650 vector aggregates above, but still need to check for float here. */
10651 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10652 && !SCALAR_FLOAT_MODE_P (elt_mode));
10654 /* We used to check for BLKmode instead of the above aggregate type
10655 check. Warn when this results in any difference to the ABI. */
10656 if (aggregate_p != (mode == BLKmode))
10658 static bool warned;
10659 if (!warned && warn_psabi)
10661 warned = true;
10662 inform (input_location,
10663 "the ABI of passing aggregates with %d-byte alignment"
10664 " has changed in GCC 5",
10665 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10669 if (aggregate_p)
10670 return 128;
10673 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10674 implement the "aggregate type" check as a BLKmode check here; this
10675 means certain aggregate types are in fact not aligned. */
10676 if (TARGET_MACHO && rs6000_darwin64_abi
10677 && mode == BLKmode
10678 && type && TYPE_ALIGN (type) > 64)
10679 return 128;
10681 return PARM_BOUNDARY;
10684 /* The offset in words to the start of the parameter save area. */
10686 static unsigned int
10687 rs6000_parm_offset (void)
10689 return (DEFAULT_ABI == ABI_V4 ? 2
10690 : DEFAULT_ABI == ABI_ELFv2 ? 4
10691 : 6);
10694 /* For a function parm of MODE and TYPE, return the starting word in
10695 the parameter area. NWORDS of the parameter area are already used. */
10697 static unsigned int
10698 rs6000_parm_start (machine_mode mode, const_tree type,
10699 unsigned int nwords)
10701 unsigned int align;
10703 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10704 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10707 /* Compute the size (in words) of a function argument. */
10709 static unsigned long
10710 rs6000_arg_size (machine_mode mode, const_tree type)
10712 unsigned long size;
10714 if (mode != BLKmode)
10715 size = GET_MODE_SIZE (mode);
10716 else
10717 size = int_size_in_bytes (type);
10719 if (TARGET_32BIT)
10720 return (size + 3) >> 2;
10721 else
10722 return (size + 7) >> 3;
10725 /* Use this to flush pending int fields. */
10727 static void
10728 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10729 HOST_WIDE_INT bitpos, int final)
10731 unsigned int startbit, endbit;
10732 int intregs, intoffset;
10734 /* Handle the situations where a float is taking up the first half
10735 of the GPR, and the other half is empty (typically due to
10736 alignment restrictions). We can detect this by a 8-byte-aligned
10737 int field, or by seeing that this is the final flush for this
10738 argument. Count the word and continue on. */
10739 if (cum->floats_in_gpr == 1
10740 && (cum->intoffset % 64 == 0
10741 || (cum->intoffset == -1 && final)))
10743 cum->words++;
10744 cum->floats_in_gpr = 0;
10747 if (cum->intoffset == -1)
10748 return;
10750 intoffset = cum->intoffset;
10751 cum->intoffset = -1;
10752 cum->floats_in_gpr = 0;
10754 if (intoffset % BITS_PER_WORD != 0)
10756 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10757 if (!int_mode_for_size (bits, 0).exists ())
10759 /* We couldn't find an appropriate mode, which happens,
10760 e.g., in packed structs when there are 3 bytes to load.
10761 Back intoffset back to the beginning of the word in this
10762 case. */
10763 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10767 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10768 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10769 intregs = (endbit - startbit) / BITS_PER_WORD;
10770 cum->words += intregs;
10771 /* words should be unsigned. */
10772 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10774 int pad = (endbit/BITS_PER_WORD) - cum->words;
10775 cum->words += pad;
10779 /* The darwin64 ABI calls for us to recurse down through structs,
10780 looking for elements passed in registers. Unfortunately, we have
10781 to track int register count here also because of misalignments
10782 in powerpc alignment mode. */
10784 static void
10785 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10786 const_tree type,
10787 HOST_WIDE_INT startbitpos)
10789 tree f;
10791 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10792 if (TREE_CODE (f) == FIELD_DECL)
10794 HOST_WIDE_INT bitpos = startbitpos;
10795 tree ftype = TREE_TYPE (f);
10796 machine_mode mode;
10797 if (ftype == error_mark_node)
10798 continue;
10799 mode = TYPE_MODE (ftype);
10801 if (DECL_SIZE (f) != 0
10802 && tree_fits_uhwi_p (bit_position (f)))
10803 bitpos += int_bit_position (f);
10805 /* ??? FIXME: else assume zero offset. */
10807 if (TREE_CODE (ftype) == RECORD_TYPE)
10808 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10809 else if (USE_FP_FOR_ARG_P (cum, mode))
10811 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10812 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10813 cum->fregno += n_fpregs;
10814 /* Single-precision floats present a special problem for
10815 us, because they are smaller than an 8-byte GPR, and so
10816 the structure-packing rules combined with the standard
10817 varargs behavior mean that we want to pack float/float
10818 and float/int combinations into a single register's
10819 space. This is complicated by the arg advance flushing,
10820 which works on arbitrarily large groups of int-type
10821 fields. */
10822 if (mode == SFmode)
10824 if (cum->floats_in_gpr == 1)
10826 /* Two floats in a word; count the word and reset
10827 the float count. */
10828 cum->words++;
10829 cum->floats_in_gpr = 0;
10831 else if (bitpos % 64 == 0)
10833 /* A float at the beginning of an 8-byte word;
10834 count it and put off adjusting cum->words until
10835 we see if a arg advance flush is going to do it
10836 for us. */
10837 cum->floats_in_gpr++;
10839 else
10841 /* The float is at the end of a word, preceded
10842 by integer fields, so the arg advance flush
10843 just above has already set cum->words and
10844 everything is taken care of. */
10847 else
10848 cum->words += n_fpregs;
10850 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10852 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10853 cum->vregno++;
10854 cum->words += 2;
10856 else if (cum->intoffset == -1)
10857 cum->intoffset = bitpos;
10861 /* Check for an item that needs to be considered specially under the darwin 64
10862 bit ABI. These are record types where the mode is BLK or the structure is
10863 8 bytes in size. */
10864 static int
10865 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10867 return rs6000_darwin64_abi
10868 && ((mode == BLKmode
10869 && TREE_CODE (type) == RECORD_TYPE
10870 && int_size_in_bytes (type) > 0)
10871 || (type && TREE_CODE (type) == RECORD_TYPE
10872 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10875 /* Update the data in CUM to advance over an argument
10876 of mode MODE and data type TYPE.
10877 (TYPE is null for libcalls where that information may not be available.)
10879 Note that for args passed by reference, function_arg will be called
10880 with MODE and TYPE set to that of the pointer to the arg, not the arg
10881 itself. */
10883 static void
10884 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10885 const_tree type, bool named, int depth)
10887 machine_mode elt_mode;
10888 int n_elts;
10890 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10892 /* Only tick off an argument if we're not recursing. */
10893 if (depth == 0)
10894 cum->nargs_prototype--;
10896 #ifdef HAVE_AS_GNU_ATTRIBUTE
10897 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10898 && cum->escapes)
10900 if (SCALAR_FLOAT_MODE_P (mode))
10902 rs6000_passes_float = true;
10903 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10904 && (FLOAT128_IBM_P (mode)
10905 || FLOAT128_IEEE_P (mode)
10906 || (type != NULL
10907 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10908 rs6000_passes_long_double = true;
10910 /* Note if we passed or return a IEEE 128-bit type. We changed the
10911 mangling for these types, and we may need to make an alias with
10912 the old mangling. */
10913 if (FLOAT128_IEEE_P (mode))
10914 rs6000_passes_ieee128 = true;
10916 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10917 rs6000_passes_vector = true;
10919 #endif
10921 if (TARGET_ALTIVEC_ABI
10922 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10923 || (type && TREE_CODE (type) == VECTOR_TYPE
10924 && int_size_in_bytes (type) == 16)))
10926 bool stack = false;
10928 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10930 cum->vregno += n_elts;
10932 if (!TARGET_ALTIVEC)
10933 error ("cannot pass argument in vector register because"
10934 " altivec instructions are disabled, use %qs"
10935 " to enable them", "-maltivec");
10937 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10938 even if it is going to be passed in a vector register.
10939 Darwin does the same for variable-argument functions. */
10940 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10941 && TARGET_64BIT)
10942 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10943 stack = true;
10945 else
10946 stack = true;
10948 if (stack)
10950 int align;
10952 /* Vector parameters must be 16-byte aligned. In 32-bit
10953 mode this means we need to take into account the offset
10954 to the parameter save area. In 64-bit mode, they just
10955 have to start on an even word, since the parameter save
10956 area is 16-byte aligned. */
10957 if (TARGET_32BIT)
10958 align = -(rs6000_parm_offset () + cum->words) & 3;
10959 else
10960 align = cum->words & 1;
10961 cum->words += align + rs6000_arg_size (mode, type);
10963 if (TARGET_DEBUG_ARG)
10965 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10966 cum->words, align);
10967 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10968 cum->nargs_prototype, cum->prototype,
10969 GET_MODE_NAME (mode));
10973 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10975 int size = int_size_in_bytes (type);
10976 /* Variable sized types have size == -1 and are
10977 treated as if consisting entirely of ints.
10978 Pad to 16 byte boundary if needed. */
10979 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10980 && (cum->words % 2) != 0)
10981 cum->words++;
10982 /* For varargs, we can just go up by the size of the struct. */
10983 if (!named)
10984 cum->words += (size + 7) / 8;
10985 else
10987 /* It is tempting to say int register count just goes up by
10988 sizeof(type)/8, but this is wrong in a case such as
10989 { int; double; int; } [powerpc alignment]. We have to
10990 grovel through the fields for these too. */
10991 cum->intoffset = 0;
10992 cum->floats_in_gpr = 0;
10993 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
10994 rs6000_darwin64_record_arg_advance_flush (cum,
10995 size * BITS_PER_UNIT, 1);
10997 if (TARGET_DEBUG_ARG)
10999 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11000 cum->words, TYPE_ALIGN (type), size);
11001 fprintf (stderr,
11002 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11003 cum->nargs_prototype, cum->prototype,
11004 GET_MODE_NAME (mode));
11007 else if (DEFAULT_ABI == ABI_V4)
11009 if (abi_v4_pass_in_fpr (mode, named))
11011 /* _Decimal128 must use an even/odd register pair. This assumes
11012 that the register number is odd when fregno is odd. */
11013 if (mode == TDmode && (cum->fregno % 2) == 1)
11014 cum->fregno++;
11016 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11017 <= FP_ARG_V4_MAX_REG)
11018 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11019 else
11021 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11022 if (mode == DFmode || FLOAT128_IBM_P (mode)
11023 || mode == DDmode || mode == TDmode)
11024 cum->words += cum->words & 1;
11025 cum->words += rs6000_arg_size (mode, type);
11028 else
11030 int n_words = rs6000_arg_size (mode, type);
11031 int gregno = cum->sysv_gregno;
11033 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11034 As does any other 2 word item such as complex int due to a
11035 historical mistake. */
11036 if (n_words == 2)
11037 gregno += (1 - gregno) & 1;
11039 /* Multi-reg args are not split between registers and stack. */
11040 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11042 /* Long long is aligned on the stack. So are other 2 word
11043 items such as complex int due to a historical mistake. */
11044 if (n_words == 2)
11045 cum->words += cum->words & 1;
11046 cum->words += n_words;
11049 /* Note: continuing to accumulate gregno past when we've started
11050 spilling to the stack indicates the fact that we've started
11051 spilling to the stack to expand_builtin_saveregs. */
11052 cum->sysv_gregno = gregno + n_words;
11055 if (TARGET_DEBUG_ARG)
11057 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11058 cum->words, cum->fregno);
11059 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11060 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11061 fprintf (stderr, "mode = %4s, named = %d\n",
11062 GET_MODE_NAME (mode), named);
11065 else
11067 int n_words = rs6000_arg_size (mode, type);
11068 int start_words = cum->words;
11069 int align_words = rs6000_parm_start (mode, type, start_words);
11071 cum->words = align_words + n_words;
11073 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11075 /* _Decimal128 must be passed in an even/odd float register pair.
11076 This assumes that the register number is odd when fregno is
11077 odd. */
11078 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11079 cum->fregno++;
11080 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11083 if (TARGET_DEBUG_ARG)
11085 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11086 cum->words, cum->fregno);
11087 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11088 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11089 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11090 named, align_words - start_words, depth);
11095 static void
11096 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11097 const_tree type, bool named)
11099 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11103 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11104 structure between cum->intoffset and bitpos to integer registers. */
11106 static void
11107 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11108 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11110 machine_mode mode;
11111 unsigned int regno;
11112 unsigned int startbit, endbit;
11113 int this_regno, intregs, intoffset;
11114 rtx reg;
11116 if (cum->intoffset == -1)
11117 return;
11119 intoffset = cum->intoffset;
11120 cum->intoffset = -1;
11122 /* If this is the trailing part of a word, try to only load that
11123 much into the register. Otherwise load the whole register. Note
11124 that in the latter case we may pick up unwanted bits. It's not a
11125 problem at the moment but may wish to revisit. */
11127 if (intoffset % BITS_PER_WORD != 0)
11129 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11130 if (!int_mode_for_size (bits, 0).exists (&mode))
11132 /* We couldn't find an appropriate mode, which happens,
11133 e.g., in packed structs when there are 3 bytes to load.
11134 Back intoffset back to the beginning of the word in this
11135 case. */
11136 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11137 mode = word_mode;
11140 else
11141 mode = word_mode;
11143 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11144 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11145 intregs = (endbit - startbit) / BITS_PER_WORD;
11146 this_regno = cum->words + intoffset / BITS_PER_WORD;
11148 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11149 cum->use_stack = 1;
11151 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11152 if (intregs <= 0)
11153 return;
11155 intoffset /= BITS_PER_UNIT;
11158 regno = GP_ARG_MIN_REG + this_regno;
11159 reg = gen_rtx_REG (mode, regno);
11160 rvec[(*k)++] =
11161 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11163 this_regno += 1;
11164 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11165 mode = word_mode;
11166 intregs -= 1;
11168 while (intregs > 0);
11171 /* Recursive workhorse for the following. */
11173 static void
11174 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11175 HOST_WIDE_INT startbitpos, rtx rvec[],
11176 int *k)
11178 tree f;
11180 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11181 if (TREE_CODE (f) == FIELD_DECL)
11183 HOST_WIDE_INT bitpos = startbitpos;
11184 tree ftype = TREE_TYPE (f);
11185 machine_mode mode;
11186 if (ftype == error_mark_node)
11187 continue;
11188 mode = TYPE_MODE (ftype);
11190 if (DECL_SIZE (f) != 0
11191 && tree_fits_uhwi_p (bit_position (f)))
11192 bitpos += int_bit_position (f);
11194 /* ??? FIXME: else assume zero offset. */
11196 if (TREE_CODE (ftype) == RECORD_TYPE)
11197 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11198 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11200 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11201 #if 0
11202 switch (mode)
11204 case E_SCmode: mode = SFmode; break;
11205 case E_DCmode: mode = DFmode; break;
11206 case E_TCmode: mode = TFmode; break;
11207 default: break;
11209 #endif
11210 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11211 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11213 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11214 && (mode == TFmode || mode == TDmode));
11215 /* Long double or _Decimal128 split over regs and memory. */
11216 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11217 cum->use_stack=1;
11219 rvec[(*k)++]
11220 = gen_rtx_EXPR_LIST (VOIDmode,
11221 gen_rtx_REG (mode, cum->fregno++),
11222 GEN_INT (bitpos / BITS_PER_UNIT));
11223 if (FLOAT128_2REG_P (mode))
11224 cum->fregno++;
11226 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11228 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11229 rvec[(*k)++]
11230 = gen_rtx_EXPR_LIST (VOIDmode,
11231 gen_rtx_REG (mode, cum->vregno++),
11232 GEN_INT (bitpos / BITS_PER_UNIT));
11234 else if (cum->intoffset == -1)
11235 cum->intoffset = bitpos;
11239 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11240 the register(s) to be used for each field and subfield of a struct
11241 being passed by value, along with the offset of where the
11242 register's value may be found in the block. FP fields go in FP
11243 register, vector fields go in vector registers, and everything
11244 else goes in int registers, packed as in memory.
11246 This code is also used for function return values. RETVAL indicates
11247 whether this is the case.
11249 Much of this is taken from the SPARC V9 port, which has a similar
11250 calling convention. */
11252 static rtx
11253 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11254 bool named, bool retval)
11256 rtx rvec[FIRST_PSEUDO_REGISTER];
11257 int k = 1, kbase = 1;
11258 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11259 /* This is a copy; modifications are not visible to our caller. */
11260 CUMULATIVE_ARGS copy_cum = *orig_cum;
11261 CUMULATIVE_ARGS *cum = &copy_cum;
11263 /* Pad to 16 byte boundary if needed. */
11264 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11265 && (cum->words % 2) != 0)
11266 cum->words++;
11268 cum->intoffset = 0;
11269 cum->use_stack = 0;
11270 cum->named = named;
11272 /* Put entries into rvec[] for individual FP and vector fields, and
11273 for the chunks of memory that go in int regs. Note we start at
11274 element 1; 0 is reserved for an indication of using memory, and
11275 may or may not be filled in below. */
11276 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11277 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11279 /* If any part of the struct went on the stack put all of it there.
11280 This hack is because the generic code for
11281 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11282 parts of the struct are not at the beginning. */
11283 if (cum->use_stack)
11285 if (retval)
11286 return NULL_RTX; /* doesn't go in registers at all */
11287 kbase = 0;
11288 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11290 if (k > 1 || cum->use_stack)
11291 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11292 else
11293 return NULL_RTX;
11296 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11298 static rtx
11299 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11300 int align_words)
11302 int n_units;
11303 int i, k;
11304 rtx rvec[GP_ARG_NUM_REG + 1];
11306 if (align_words >= GP_ARG_NUM_REG)
11307 return NULL_RTX;
11309 n_units = rs6000_arg_size (mode, type);
11311 /* Optimize the simple case where the arg fits in one gpr, except in
11312 the case of BLKmode due to assign_parms assuming that registers are
11313 BITS_PER_WORD wide. */
11314 if (n_units == 0
11315 || (n_units == 1 && mode != BLKmode))
11316 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11318 k = 0;
11319 if (align_words + n_units > GP_ARG_NUM_REG)
11320 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11321 using a magic NULL_RTX component.
11322 This is not strictly correct. Only some of the arg belongs in
11323 memory, not all of it. However, the normal scheme using
11324 function_arg_partial_nregs can result in unusual subregs, eg.
11325 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11326 store the whole arg to memory is often more efficient than code
11327 to store pieces, and we know that space is available in the right
11328 place for the whole arg. */
11329 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11331 i = 0;
11334 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11335 rtx off = GEN_INT (i++ * 4);
11336 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11338 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11340 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11343 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11344 but must also be copied into the parameter save area starting at
11345 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11346 to the GPRs and/or memory. Return the number of elements used. */
11348 static int
11349 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11350 int align_words, rtx *rvec)
11352 int k = 0;
11354 if (align_words < GP_ARG_NUM_REG)
11356 int n_words = rs6000_arg_size (mode, type);
11358 if (align_words + n_words > GP_ARG_NUM_REG
11359 || mode == BLKmode
11360 || (TARGET_32BIT && TARGET_POWERPC64))
11362 /* If this is partially on the stack, then we only
11363 include the portion actually in registers here. */
11364 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11365 int i = 0;
11367 if (align_words + n_words > GP_ARG_NUM_REG)
11369 /* Not all of the arg fits in gprs. Say that it goes in memory
11370 too, using a magic NULL_RTX component. Also see comment in
11371 rs6000_mixed_function_arg for why the normal
11372 function_arg_partial_nregs scheme doesn't work in this case. */
11373 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11378 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11379 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11380 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11382 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11384 else
11386 /* The whole arg fits in gprs. */
11387 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11388 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11391 else
11393 /* It's entirely in memory. */
11394 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11397 return k;
11400 /* RVEC is a vector of K components of an argument of mode MODE.
11401 Construct the final function_arg return value from it. */
11403 static rtx
11404 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11406 gcc_assert (k >= 1);
11408 /* Avoid returning a PARALLEL in the trivial cases. */
11409 if (k == 1)
11411 if (XEXP (rvec[0], 0) == NULL_RTX)
11412 return NULL_RTX;
11414 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11415 return XEXP (rvec[0], 0);
11418 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11421 /* Determine where to put an argument to a function.
11422 Value is zero to push the argument on the stack,
11423 or a hard register in which to store the argument.
11425 MODE is the argument's machine mode.
11426 TYPE is the data type of the argument (as a tree).
11427 This is null for libcalls where that information may
11428 not be available.
11429 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11430 the preceding args and about the function being called. It is
11431 not modified in this routine.
11432 NAMED is nonzero if this argument is a named parameter
11433 (otherwise it is an extra parameter matching an ellipsis).
11435 On RS/6000 the first eight words of non-FP are normally in registers
11436 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11437 Under V.4, the first 8 FP args are in registers.
11439 If this is floating-point and no prototype is specified, we use
11440 both an FP and integer register (or possibly FP reg and stack). Library
11441 functions (when CALL_LIBCALL is set) always have the proper types for args,
11442 so we can pass the FP value just in one register. emit_library_function
11443 doesn't support PARALLEL anyway.
11445 Note that for args passed by reference, function_arg will be called
11446 with MODE and TYPE set to that of the pointer to the arg, not the arg
11447 itself. */
11449 static rtx
11450 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11451 const_tree type, bool named)
11453 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11454 enum rs6000_abi abi = DEFAULT_ABI;
11455 machine_mode elt_mode;
11456 int n_elts;
11458 /* Return a marker to indicate whether CR1 needs to set or clear the
11459 bit that V.4 uses to say fp args were passed in registers.
11460 Assume that we don't need the marker for software floating point,
11461 or compiler generated library calls. */
11462 if (mode == VOIDmode)
11464 if (abi == ABI_V4
11465 && (cum->call_cookie & CALL_LIBCALL) == 0
11466 && (cum->stdarg
11467 || (cum->nargs_prototype < 0
11468 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11469 && TARGET_HARD_FLOAT)
11470 return GEN_INT (cum->call_cookie
11471 | ((cum->fregno == FP_ARG_MIN_REG)
11472 ? CALL_V4_SET_FP_ARGS
11473 : CALL_V4_CLEAR_FP_ARGS));
11475 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11478 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11480 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11482 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11483 if (rslt != NULL_RTX)
11484 return rslt;
11485 /* Else fall through to usual handling. */
11488 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11490 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11491 rtx r, off;
11492 int i, k = 0;
11494 /* Do we also need to pass this argument in the parameter save area?
11495 Library support functions for IEEE 128-bit are assumed to not need the
11496 value passed both in GPRs and in vector registers. */
11497 if (TARGET_64BIT && !cum->prototype
11498 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11500 int align_words = ROUND_UP (cum->words, 2);
11501 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11504 /* Describe where this argument goes in the vector registers. */
11505 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11507 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11508 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11509 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11512 return rs6000_finish_function_arg (mode, rvec, k);
11514 else if (TARGET_ALTIVEC_ABI
11515 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11516 || (type && TREE_CODE (type) == VECTOR_TYPE
11517 && int_size_in_bytes (type) == 16)))
11519 if (named || abi == ABI_V4)
11520 return NULL_RTX;
11521 else
11523 /* Vector parameters to varargs functions under AIX or Darwin
11524 get passed in memory and possibly also in GPRs. */
11525 int align, align_words, n_words;
11526 machine_mode part_mode;
11528 /* Vector parameters must be 16-byte aligned. In 32-bit
11529 mode this means we need to take into account the offset
11530 to the parameter save area. In 64-bit mode, they just
11531 have to start on an even word, since the parameter save
11532 area is 16-byte aligned. */
11533 if (TARGET_32BIT)
11534 align = -(rs6000_parm_offset () + cum->words) & 3;
11535 else
11536 align = cum->words & 1;
11537 align_words = cum->words + align;
11539 /* Out of registers? Memory, then. */
11540 if (align_words >= GP_ARG_NUM_REG)
11541 return NULL_RTX;
11543 if (TARGET_32BIT && TARGET_POWERPC64)
11544 return rs6000_mixed_function_arg (mode, type, align_words);
11546 /* The vector value goes in GPRs. Only the part of the
11547 value in GPRs is reported here. */
11548 part_mode = mode;
11549 n_words = rs6000_arg_size (mode, type);
11550 if (align_words + n_words > GP_ARG_NUM_REG)
11551 /* Fortunately, there are only two possibilities, the value
11552 is either wholly in GPRs or half in GPRs and half not. */
11553 part_mode = DImode;
11555 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11559 else if (abi == ABI_V4)
11561 if (abi_v4_pass_in_fpr (mode, named))
11563 /* _Decimal128 must use an even/odd register pair. This assumes
11564 that the register number is odd when fregno is odd. */
11565 if (mode == TDmode && (cum->fregno % 2) == 1)
11566 cum->fregno++;
11568 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11569 <= FP_ARG_V4_MAX_REG)
11570 return gen_rtx_REG (mode, cum->fregno);
11571 else
11572 return NULL_RTX;
11574 else
11576 int n_words = rs6000_arg_size (mode, type);
11577 int gregno = cum->sysv_gregno;
11579 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11580 As does any other 2 word item such as complex int due to a
11581 historical mistake. */
11582 if (n_words == 2)
11583 gregno += (1 - gregno) & 1;
11585 /* Multi-reg args are not split between registers and stack. */
11586 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11587 return NULL_RTX;
11589 if (TARGET_32BIT && TARGET_POWERPC64)
11590 return rs6000_mixed_function_arg (mode, type,
11591 gregno - GP_ARG_MIN_REG);
11592 return gen_rtx_REG (mode, gregno);
11595 else
11597 int align_words = rs6000_parm_start (mode, type, cum->words);
11599 /* _Decimal128 must be passed in an even/odd float register pair.
11600 This assumes that the register number is odd when fregno is odd. */
11601 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11602 cum->fregno++;
11604 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11605 && !(TARGET_AIX && !TARGET_ELF
11606 && type != NULL && AGGREGATE_TYPE_P (type)))
11608 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11609 rtx r, off;
11610 int i, k = 0;
11611 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11612 int fpr_words;
11614 /* Do we also need to pass this argument in the parameter
11615 save area? */
11616 if (type && (cum->nargs_prototype <= 0
11617 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11618 && TARGET_XL_COMPAT
11619 && align_words >= GP_ARG_NUM_REG)))
11620 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11622 /* Describe where this argument goes in the fprs. */
11623 for (i = 0; i < n_elts
11624 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11626 /* Check if the argument is split over registers and memory.
11627 This can only ever happen for long double or _Decimal128;
11628 complex types are handled via split_complex_arg. */
11629 machine_mode fmode = elt_mode;
11630 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11632 gcc_assert (FLOAT128_2REG_P (fmode));
11633 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11636 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11637 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11638 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11641 /* If there were not enough FPRs to hold the argument, the rest
11642 usually goes into memory. However, if the current position
11643 is still within the register parameter area, a portion may
11644 actually have to go into GPRs.
11646 Note that it may happen that the portion of the argument
11647 passed in the first "half" of the first GPR was already
11648 passed in the last FPR as well.
11650 For unnamed arguments, we already set up GPRs to cover the
11651 whole argument in rs6000_psave_function_arg, so there is
11652 nothing further to do at this point. */
11653 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11654 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11655 && cum->nargs_prototype > 0)
11657 static bool warned;
11659 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11660 int n_words = rs6000_arg_size (mode, type);
11662 align_words += fpr_words;
11663 n_words -= fpr_words;
11667 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11668 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11669 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11671 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11673 if (!warned && warn_psabi)
11675 warned = true;
11676 inform (input_location,
11677 "the ABI of passing homogeneous %<float%> aggregates"
11678 " has changed in GCC 5");
11682 return rs6000_finish_function_arg (mode, rvec, k);
11684 else if (align_words < GP_ARG_NUM_REG)
11686 if (TARGET_32BIT && TARGET_POWERPC64)
11687 return rs6000_mixed_function_arg (mode, type, align_words);
11689 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11691 else
11692 return NULL_RTX;
11696 /* For an arg passed partly in registers and partly in memory, this is
11697 the number of bytes passed in registers. For args passed entirely in
11698 registers or entirely in memory, zero. When an arg is described by a
11699 PARALLEL, perhaps using more than one register type, this function
11700 returns the number of bytes used by the first element of the PARALLEL. */
11702 static int
11703 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11704 tree type, bool named)
11706 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11707 bool passed_in_gprs = true;
11708 int ret = 0;
11709 int align_words;
11710 machine_mode elt_mode;
11711 int n_elts;
11713 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11715 if (DEFAULT_ABI == ABI_V4)
11716 return 0;
11718 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11720 /* If we are passing this arg in the fixed parameter save area (gprs or
11721 memory) as well as VRs, we do not use the partial bytes mechanism;
11722 instead, rs6000_function_arg will return a PARALLEL including a memory
11723 element as necessary. Library support functions for IEEE 128-bit are
11724 assumed to not need the value passed both in GPRs and in vector
11725 registers. */
11726 if (TARGET_64BIT && !cum->prototype
11727 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11728 return 0;
11730 /* Otherwise, we pass in VRs only. Check for partial copies. */
11731 passed_in_gprs = false;
11732 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11733 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11736 /* In this complicated case we just disable the partial_nregs code. */
11737 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11738 return 0;
11740 align_words = rs6000_parm_start (mode, type, cum->words);
11742 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11743 && !(TARGET_AIX && !TARGET_ELF
11744 && type != NULL && AGGREGATE_TYPE_P (type)))
11746 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11748 /* If we are passing this arg in the fixed parameter save area
11749 (gprs or memory) as well as FPRs, we do not use the partial
11750 bytes mechanism; instead, rs6000_function_arg will return a
11751 PARALLEL including a memory element as necessary. */
11752 if (type
11753 && (cum->nargs_prototype <= 0
11754 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11755 && TARGET_XL_COMPAT
11756 && align_words >= GP_ARG_NUM_REG)))
11757 return 0;
11759 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11760 passed_in_gprs = false;
11761 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11763 /* Compute number of bytes / words passed in FPRs. If there
11764 is still space available in the register parameter area
11765 *after* that amount, a part of the argument will be passed
11766 in GPRs. In that case, the total amount passed in any
11767 registers is equal to the amount that would have been passed
11768 in GPRs if everything were passed there, so we fall back to
11769 the GPR code below to compute the appropriate value. */
11770 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11771 * MIN (8, GET_MODE_SIZE (elt_mode)));
11772 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11774 if (align_words + fpr_words < GP_ARG_NUM_REG)
11775 passed_in_gprs = true;
11776 else
11777 ret = fpr;
11781 if (passed_in_gprs
11782 && align_words < GP_ARG_NUM_REG
11783 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11784 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11786 if (ret != 0 && TARGET_DEBUG_ARG)
11787 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11789 return ret;
11792 /* A C expression that indicates when an argument must be passed by
11793 reference. If nonzero for an argument, a copy of that argument is
11794 made in memory and a pointer to the argument is passed instead of
11795 the argument itself. The pointer is passed in whatever way is
11796 appropriate for passing a pointer to that type.
11798 Under V.4, aggregates and long double are passed by reference.
11800 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11801 reference unless the AltiVec vector extension ABI is in force.
11803 As an extension to all ABIs, variable sized types are passed by
11804 reference. */
11806 static bool
11807 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11808 machine_mode mode, const_tree type,
11809 bool named ATTRIBUTE_UNUSED)
11811 if (!type)
11812 return 0;
11814 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11815 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11817 if (TARGET_DEBUG_ARG)
11818 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11819 return 1;
11822 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11824 if (TARGET_DEBUG_ARG)
11825 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11826 return 1;
11829 if (int_size_in_bytes (type) < 0)
11831 if (TARGET_DEBUG_ARG)
11832 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11833 return 1;
11836 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11837 modes only exist for GCC vector types if -maltivec. */
11838 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11840 if (TARGET_DEBUG_ARG)
11841 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11842 return 1;
11845 /* Pass synthetic vectors in memory. */
11846 if (TREE_CODE (type) == VECTOR_TYPE
11847 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11849 static bool warned_for_pass_big_vectors = false;
11850 if (TARGET_DEBUG_ARG)
11851 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11852 if (!warned_for_pass_big_vectors)
11854 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11855 "non-standard ABI extension with no compatibility "
11856 "guarantee");
11857 warned_for_pass_big_vectors = true;
11859 return 1;
11862 return 0;
11865 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11866 already processes. Return true if the parameter must be passed
11867 (fully or partially) on the stack. */
11869 static bool
11870 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11872 machine_mode mode;
11873 int unsignedp;
11874 rtx entry_parm;
11876 /* Catch errors. */
11877 if (type == NULL || type == error_mark_node)
11878 return true;
11880 /* Handle types with no storage requirement. */
11881 if (TYPE_MODE (type) == VOIDmode)
11882 return false;
11884 /* Handle complex types. */
11885 if (TREE_CODE (type) == COMPLEX_TYPE)
11886 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11887 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11889 /* Handle transparent aggregates. */
11890 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11891 && TYPE_TRANSPARENT_AGGR (type))
11892 type = TREE_TYPE (first_field (type));
11894 /* See if this arg was passed by invisible reference. */
11895 if (pass_by_reference (get_cumulative_args (args_so_far),
11896 TYPE_MODE (type), type, true))
11897 type = build_pointer_type (type);
11899 /* Find mode as it is passed by the ABI. */
11900 unsignedp = TYPE_UNSIGNED (type);
11901 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11903 /* If we must pass in stack, we need a stack. */
11904 if (rs6000_must_pass_in_stack (mode, type))
11905 return true;
11907 /* If there is no incoming register, we need a stack. */
11908 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11909 if (entry_parm == NULL)
11910 return true;
11912 /* Likewise if we need to pass both in registers and on the stack. */
11913 if (GET_CODE (entry_parm) == PARALLEL
11914 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11915 return true;
11917 /* Also true if we're partially in registers and partially not. */
11918 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
11919 return true;
11921 /* Update info on where next arg arrives in registers. */
11922 rs6000_function_arg_advance (args_so_far, mode, type, true);
11923 return false;
11926 /* Return true if FUN has no prototype, has a variable argument
11927 list, or passes any parameter in memory. */
11929 static bool
11930 rs6000_function_parms_need_stack (tree fun, bool incoming)
11932 tree fntype, result;
11933 CUMULATIVE_ARGS args_so_far_v;
11934 cumulative_args_t args_so_far;
11936 if (!fun)
11937 /* Must be a libcall, all of which only use reg parms. */
11938 return false;
11940 fntype = fun;
11941 if (!TYPE_P (fun))
11942 fntype = TREE_TYPE (fun);
11944 /* Varargs functions need the parameter save area. */
11945 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
11946 return true;
11948 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
11949 args_so_far = pack_cumulative_args (&args_so_far_v);
11951 /* When incoming, we will have been passed the function decl.
11952 It is necessary to use the decl to handle K&R style functions,
11953 where TYPE_ARG_TYPES may not be available. */
11954 if (incoming)
11956 gcc_assert (DECL_P (fun));
11957 result = DECL_RESULT (fun);
11959 else
11960 result = TREE_TYPE (fntype);
11962 if (result && aggregate_value_p (result, fntype))
11964 if (!TYPE_P (result))
11965 result = TREE_TYPE (result);
11966 result = build_pointer_type (result);
11967 rs6000_parm_needs_stack (args_so_far, result);
11970 if (incoming)
11972 tree parm;
11974 for (parm = DECL_ARGUMENTS (fun);
11975 parm && parm != void_list_node;
11976 parm = TREE_CHAIN (parm))
11977 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
11978 return true;
11980 else
11982 function_args_iterator args_iter;
11983 tree arg_type;
11985 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
11986 if (rs6000_parm_needs_stack (args_so_far, arg_type))
11987 return true;
11990 return false;
11993 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
11994 usually a constant depending on the ABI. However, in the ELFv2 ABI
11995 the register parameter area is optional when calling a function that
11996 has a prototype is scope, has no variable argument list, and passes
11997 all parameters in registers. */
12000 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12002 int reg_parm_stack_space;
12004 switch (DEFAULT_ABI)
12006 default:
12007 reg_parm_stack_space = 0;
12008 break;
12010 case ABI_AIX:
12011 case ABI_DARWIN:
12012 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12013 break;
12015 case ABI_ELFv2:
12016 /* ??? Recomputing this every time is a bit expensive. Is there
12017 a place to cache this information? */
12018 if (rs6000_function_parms_need_stack (fun, incoming))
12019 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12020 else
12021 reg_parm_stack_space = 0;
12022 break;
12025 return reg_parm_stack_space;
12028 static void
12029 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12031 int i;
12032 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12034 if (nregs == 0)
12035 return;
12037 for (i = 0; i < nregs; i++)
12039 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12040 if (reload_completed)
12042 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12043 tem = NULL_RTX;
12044 else
12045 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12046 i * GET_MODE_SIZE (reg_mode));
12048 else
12049 tem = replace_equiv_address (tem, XEXP (tem, 0));
12051 gcc_assert (tem);
12053 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12057 /* Perform any needed actions needed for a function that is receiving a
12058 variable number of arguments.
12060 CUM is as above.
12062 MODE and TYPE are the mode and type of the current parameter.
12064 PRETEND_SIZE is a variable that should be set to the amount of stack
12065 that must be pushed by the prolog to pretend that our caller pushed
12068 Normally, this macro will push all remaining incoming registers on the
12069 stack and set PRETEND_SIZE to the length of the registers pushed. */
12071 static void
12072 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12073 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12074 int no_rtl)
12076 CUMULATIVE_ARGS next_cum;
12077 int reg_size = TARGET_32BIT ? 4 : 8;
12078 rtx save_area = NULL_RTX, mem;
12079 int first_reg_offset;
12080 alias_set_type set;
12082 /* Skip the last named argument. */
12083 next_cum = *get_cumulative_args (cum);
12084 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12086 if (DEFAULT_ABI == ABI_V4)
12088 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12090 if (! no_rtl)
12092 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12093 HOST_WIDE_INT offset = 0;
12095 /* Try to optimize the size of the varargs save area.
12096 The ABI requires that ap.reg_save_area is doubleword
12097 aligned, but we don't need to allocate space for all
12098 the bytes, only those to which we actually will save
12099 anything. */
12100 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12101 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12102 if (TARGET_HARD_FLOAT
12103 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12104 && cfun->va_list_fpr_size)
12106 if (gpr_reg_num)
12107 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12108 * UNITS_PER_FP_WORD;
12109 if (cfun->va_list_fpr_size
12110 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12111 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12112 else
12113 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12114 * UNITS_PER_FP_WORD;
12116 if (gpr_reg_num)
12118 offset = -((first_reg_offset * reg_size) & ~7);
12119 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12121 gpr_reg_num = cfun->va_list_gpr_size;
12122 if (reg_size == 4 && (first_reg_offset & 1))
12123 gpr_reg_num++;
12125 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12127 else if (fpr_size)
12128 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12129 * UNITS_PER_FP_WORD
12130 - (int) (GP_ARG_NUM_REG * reg_size);
12132 if (gpr_size + fpr_size)
12134 rtx reg_save_area
12135 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12136 gcc_assert (MEM_P (reg_save_area));
12137 reg_save_area = XEXP (reg_save_area, 0);
12138 if (GET_CODE (reg_save_area) == PLUS)
12140 gcc_assert (XEXP (reg_save_area, 0)
12141 == virtual_stack_vars_rtx);
12142 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12143 offset += INTVAL (XEXP (reg_save_area, 1));
12145 else
12146 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12149 cfun->machine->varargs_save_offset = offset;
12150 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12153 else
12155 first_reg_offset = next_cum.words;
12156 save_area = crtl->args.internal_arg_pointer;
12158 if (targetm.calls.must_pass_in_stack (mode, type))
12159 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12162 set = get_varargs_alias_set ();
12163 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12164 && cfun->va_list_gpr_size)
12166 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12168 if (va_list_gpr_counter_field)
12169 /* V4 va_list_gpr_size counts number of registers needed. */
12170 n_gpr = cfun->va_list_gpr_size;
12171 else
12172 /* char * va_list instead counts number of bytes needed. */
12173 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12175 if (nregs > n_gpr)
12176 nregs = n_gpr;
12178 mem = gen_rtx_MEM (BLKmode,
12179 plus_constant (Pmode, save_area,
12180 first_reg_offset * reg_size));
12181 MEM_NOTRAP_P (mem) = 1;
12182 set_mem_alias_set (mem, set);
12183 set_mem_align (mem, BITS_PER_WORD);
12185 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12186 nregs);
12189 /* Save FP registers if needed. */
12190 if (DEFAULT_ABI == ABI_V4
12191 && TARGET_HARD_FLOAT
12192 && ! no_rtl
12193 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12194 && cfun->va_list_fpr_size)
12196 int fregno = next_cum.fregno, nregs;
12197 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12198 rtx lab = gen_label_rtx ();
12199 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12200 * UNITS_PER_FP_WORD);
12202 emit_jump_insn
12203 (gen_rtx_SET (pc_rtx,
12204 gen_rtx_IF_THEN_ELSE (VOIDmode,
12205 gen_rtx_NE (VOIDmode, cr1,
12206 const0_rtx),
12207 gen_rtx_LABEL_REF (VOIDmode, lab),
12208 pc_rtx)));
12210 for (nregs = 0;
12211 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12212 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12214 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12215 plus_constant (Pmode, save_area, off));
12216 MEM_NOTRAP_P (mem) = 1;
12217 set_mem_alias_set (mem, set);
12218 set_mem_align (mem, GET_MODE_ALIGNMENT (
12219 TARGET_HARD_FLOAT ? DFmode : SFmode));
12220 emit_move_insn (mem, gen_rtx_REG (
12221 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12224 emit_label (lab);
12228 /* Create the va_list data type. */
12230 static tree
12231 rs6000_build_builtin_va_list (void)
12233 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12235 /* For AIX, prefer 'char *' because that's what the system
12236 header files like. */
12237 if (DEFAULT_ABI != ABI_V4)
12238 return build_pointer_type (char_type_node);
12240 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12241 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12242 get_identifier ("__va_list_tag"), record);
12244 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12245 unsigned_char_type_node);
12246 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12247 unsigned_char_type_node);
12248 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12249 every user file. */
12250 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12251 get_identifier ("reserved"), short_unsigned_type_node);
12252 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12253 get_identifier ("overflow_arg_area"),
12254 ptr_type_node);
12255 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12256 get_identifier ("reg_save_area"),
12257 ptr_type_node);
12259 va_list_gpr_counter_field = f_gpr;
12260 va_list_fpr_counter_field = f_fpr;
12262 DECL_FIELD_CONTEXT (f_gpr) = record;
12263 DECL_FIELD_CONTEXT (f_fpr) = record;
12264 DECL_FIELD_CONTEXT (f_res) = record;
12265 DECL_FIELD_CONTEXT (f_ovf) = record;
12266 DECL_FIELD_CONTEXT (f_sav) = record;
12268 TYPE_STUB_DECL (record) = type_decl;
12269 TYPE_NAME (record) = type_decl;
12270 TYPE_FIELDS (record) = f_gpr;
12271 DECL_CHAIN (f_gpr) = f_fpr;
12272 DECL_CHAIN (f_fpr) = f_res;
12273 DECL_CHAIN (f_res) = f_ovf;
12274 DECL_CHAIN (f_ovf) = f_sav;
12276 layout_type (record);
12278 /* The correct type is an array type of one element. */
12279 return build_array_type (record, build_index_type (size_zero_node));
12282 /* Implement va_start. */
12284 static void
12285 rs6000_va_start (tree valist, rtx nextarg)
12287 HOST_WIDE_INT words, n_gpr, n_fpr;
12288 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12289 tree gpr, fpr, ovf, sav, t;
12291 /* Only SVR4 needs something special. */
12292 if (DEFAULT_ABI != ABI_V4)
12294 std_expand_builtin_va_start (valist, nextarg);
12295 return;
12298 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12299 f_fpr = DECL_CHAIN (f_gpr);
12300 f_res = DECL_CHAIN (f_fpr);
12301 f_ovf = DECL_CHAIN (f_res);
12302 f_sav = DECL_CHAIN (f_ovf);
12304 valist = build_simple_mem_ref (valist);
12305 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12306 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12307 f_fpr, NULL_TREE);
12308 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12309 f_ovf, NULL_TREE);
12310 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12311 f_sav, NULL_TREE);
12313 /* Count number of gp and fp argument registers used. */
12314 words = crtl->args.info.words;
12315 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12316 GP_ARG_NUM_REG);
12317 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12318 FP_ARG_NUM_REG);
12320 if (TARGET_DEBUG_ARG)
12321 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12322 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12323 words, n_gpr, n_fpr);
12325 if (cfun->va_list_gpr_size)
12327 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12328 build_int_cst (NULL_TREE, n_gpr));
12329 TREE_SIDE_EFFECTS (t) = 1;
12330 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12333 if (cfun->va_list_fpr_size)
12335 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12336 build_int_cst (NULL_TREE, n_fpr));
12337 TREE_SIDE_EFFECTS (t) = 1;
12338 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12340 #ifdef HAVE_AS_GNU_ATTRIBUTE
12341 if (call_ABI_of_interest (cfun->decl))
12342 rs6000_passes_float = true;
12343 #endif
12346 /* Find the overflow area. */
12347 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12348 if (words != 0)
12349 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12350 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12351 TREE_SIDE_EFFECTS (t) = 1;
12352 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12354 /* If there were no va_arg invocations, don't set up the register
12355 save area. */
12356 if (!cfun->va_list_gpr_size
12357 && !cfun->va_list_fpr_size
12358 && n_gpr < GP_ARG_NUM_REG
12359 && n_fpr < FP_ARG_V4_MAX_REG)
12360 return;
12362 /* Find the register save area. */
12363 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12364 if (cfun->machine->varargs_save_offset)
12365 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12366 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12367 TREE_SIDE_EFFECTS (t) = 1;
12368 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12371 /* Implement va_arg. */
12373 static tree
12374 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12375 gimple_seq *post_p)
12377 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12378 tree gpr, fpr, ovf, sav, reg, t, u;
12379 int size, rsize, n_reg, sav_ofs, sav_scale;
12380 tree lab_false, lab_over, addr;
12381 int align;
12382 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12383 int regalign = 0;
12384 gimple *stmt;
12386 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12388 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12389 return build_va_arg_indirect_ref (t);
12392 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12393 earlier version of gcc, with the property that it always applied alignment
12394 adjustments to the va-args (even for zero-sized types). The cheapest way
12395 to deal with this is to replicate the effect of the part of
12396 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12397 of relevance.
12398 We don't need to check for pass-by-reference because of the test above.
12399 We can return a simplifed answer, since we know there's no offset to add. */
12401 if (((TARGET_MACHO
12402 && rs6000_darwin64_abi)
12403 || DEFAULT_ABI == ABI_ELFv2
12404 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12405 && integer_zerop (TYPE_SIZE (type)))
12407 unsigned HOST_WIDE_INT align, boundary;
12408 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12409 align = PARM_BOUNDARY / BITS_PER_UNIT;
12410 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12411 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12412 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12413 boundary /= BITS_PER_UNIT;
12414 if (boundary > align)
12416 tree t ;
12417 /* This updates arg ptr by the amount that would be necessary
12418 to align the zero-sized (but not zero-alignment) item. */
12419 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12420 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12421 gimplify_and_add (t, pre_p);
12423 t = fold_convert (sizetype, valist_tmp);
12424 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12425 fold_convert (TREE_TYPE (valist),
12426 fold_build2 (BIT_AND_EXPR, sizetype, t,
12427 size_int (-boundary))));
12428 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12429 gimplify_and_add (t, pre_p);
12431 /* Since it is zero-sized there's no increment for the item itself. */
12432 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12433 return build_va_arg_indirect_ref (valist_tmp);
12436 if (DEFAULT_ABI != ABI_V4)
12438 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12440 tree elem_type = TREE_TYPE (type);
12441 machine_mode elem_mode = TYPE_MODE (elem_type);
12442 int elem_size = GET_MODE_SIZE (elem_mode);
12444 if (elem_size < UNITS_PER_WORD)
12446 tree real_part, imag_part;
12447 gimple_seq post = NULL;
12449 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12450 &post);
12451 /* Copy the value into a temporary, lest the formal temporary
12452 be reused out from under us. */
12453 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12454 gimple_seq_add_seq (pre_p, post);
12456 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12457 post_p);
12459 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12463 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12466 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12467 f_fpr = DECL_CHAIN (f_gpr);
12468 f_res = DECL_CHAIN (f_fpr);
12469 f_ovf = DECL_CHAIN (f_res);
12470 f_sav = DECL_CHAIN (f_ovf);
12472 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12473 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12474 f_fpr, NULL_TREE);
12475 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12476 f_ovf, NULL_TREE);
12477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12478 f_sav, NULL_TREE);
12480 size = int_size_in_bytes (type);
12481 rsize = (size + 3) / 4;
12482 int pad = 4 * rsize - size;
12483 align = 1;
12485 machine_mode mode = TYPE_MODE (type);
12486 if (abi_v4_pass_in_fpr (mode, false))
12488 /* FP args go in FP registers, if present. */
12489 reg = fpr;
12490 n_reg = (size + 7) / 8;
12491 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12492 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12493 if (mode != SFmode && mode != SDmode)
12494 align = 8;
12496 else
12498 /* Otherwise into GP registers. */
12499 reg = gpr;
12500 n_reg = rsize;
12501 sav_ofs = 0;
12502 sav_scale = 4;
12503 if (n_reg == 2)
12504 align = 8;
12507 /* Pull the value out of the saved registers.... */
12509 lab_over = NULL;
12510 addr = create_tmp_var (ptr_type_node, "addr");
12512 /* AltiVec vectors never go in registers when -mabi=altivec. */
12513 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12514 align = 16;
12515 else
12517 lab_false = create_artificial_label (input_location);
12518 lab_over = create_artificial_label (input_location);
12520 /* Long long is aligned in the registers. As are any other 2 gpr
12521 item such as complex int due to a historical mistake. */
12522 u = reg;
12523 if (n_reg == 2 && reg == gpr)
12525 regalign = 1;
12526 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12527 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12528 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12529 unshare_expr (reg), u);
12531 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12532 reg number is 0 for f1, so we want to make it odd. */
12533 else if (reg == fpr && mode == TDmode)
12535 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12536 build_int_cst (TREE_TYPE (reg), 1));
12537 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12540 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12541 t = build2 (GE_EXPR, boolean_type_node, u, t);
12542 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12543 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12544 gimplify_and_add (t, pre_p);
12546 t = sav;
12547 if (sav_ofs)
12548 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12550 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12551 build_int_cst (TREE_TYPE (reg), n_reg));
12552 u = fold_convert (sizetype, u);
12553 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12554 t = fold_build_pointer_plus (t, u);
12556 /* _Decimal32 varargs are located in the second word of the 64-bit
12557 FP register for 32-bit binaries. */
12558 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12559 t = fold_build_pointer_plus_hwi (t, size);
12561 /* Args are passed right-aligned. */
12562 if (BYTES_BIG_ENDIAN)
12563 t = fold_build_pointer_plus_hwi (t, pad);
12565 gimplify_assign (addr, t, pre_p);
12567 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12569 stmt = gimple_build_label (lab_false);
12570 gimple_seq_add_stmt (pre_p, stmt);
12572 if ((n_reg == 2 && !regalign) || n_reg > 2)
12574 /* Ensure that we don't find any more args in regs.
12575 Alignment has taken care of for special cases. */
12576 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12580 /* ... otherwise out of the overflow area. */
12582 /* Care for on-stack alignment if needed. */
12583 t = ovf;
12584 if (align != 1)
12586 t = fold_build_pointer_plus_hwi (t, align - 1);
12587 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12588 build_int_cst (TREE_TYPE (t), -align));
12591 /* Args are passed right-aligned. */
12592 if (BYTES_BIG_ENDIAN)
12593 t = fold_build_pointer_plus_hwi (t, pad);
12595 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12597 gimplify_assign (unshare_expr (addr), t, pre_p);
12599 t = fold_build_pointer_plus_hwi (t, size);
12600 gimplify_assign (unshare_expr (ovf), t, pre_p);
12602 if (lab_over)
12604 stmt = gimple_build_label (lab_over);
12605 gimple_seq_add_stmt (pre_p, stmt);
12608 if (STRICT_ALIGNMENT
12609 && (TYPE_ALIGN (type)
12610 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12612 /* The value (of type complex double, for example) may not be
12613 aligned in memory in the saved registers, so copy via a
12614 temporary. (This is the same code as used for SPARC.) */
12615 tree tmp = create_tmp_var (type, "va_arg_tmp");
12616 tree dest_addr = build_fold_addr_expr (tmp);
12618 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12619 3, dest_addr, addr, size_int (rsize * 4));
12620 TREE_ADDRESSABLE (tmp) = 1;
12622 gimplify_and_add (copy, pre_p);
12623 addr = dest_addr;
12626 addr = fold_convert (ptrtype, addr);
12627 return build_va_arg_indirect_ref (addr);
12630 /* Builtins. */
12632 static void
12633 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12635 tree t;
12636 unsigned classify = rs6000_builtin_info[(int)code].attr;
12637 const char *attr_string = "";
12639 gcc_assert (name != NULL);
12640 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12642 if (rs6000_builtin_decls[(int)code])
12643 fatal_error (input_location,
12644 "internal error: builtin function %qs already processed",
12645 name);
12647 rs6000_builtin_decls[(int)code] = t =
12648 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12650 /* Set any special attributes. */
12651 if ((classify & RS6000_BTC_CONST) != 0)
12653 /* const function, function only depends on the inputs. */
12654 TREE_READONLY (t) = 1;
12655 TREE_NOTHROW (t) = 1;
12656 attr_string = ", const";
12658 else if ((classify & RS6000_BTC_PURE) != 0)
12660 /* pure function, function can read global memory, but does not set any
12661 external state. */
12662 DECL_PURE_P (t) = 1;
12663 TREE_NOTHROW (t) = 1;
12664 attr_string = ", pure";
12666 else if ((classify & RS6000_BTC_FP) != 0)
12668 /* Function is a math function. If rounding mode is on, then treat the
12669 function as not reading global memory, but it can have arbitrary side
12670 effects. If it is off, then assume the function is a const function.
12671 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12672 builtin-attribute.def that is used for the math functions. */
12673 TREE_NOTHROW (t) = 1;
12674 if (flag_rounding_math)
12676 DECL_PURE_P (t) = 1;
12677 DECL_IS_NOVOPS (t) = 1;
12678 attr_string = ", fp, pure";
12680 else
12682 TREE_READONLY (t) = 1;
12683 attr_string = ", fp, const";
12686 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12687 gcc_unreachable ();
12689 if (TARGET_DEBUG_BUILTIN)
12690 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12691 (int)code, name, attr_string);
12694 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12696 #undef RS6000_BUILTIN_0
12697 #undef RS6000_BUILTIN_1
12698 #undef RS6000_BUILTIN_2
12699 #undef RS6000_BUILTIN_3
12700 #undef RS6000_BUILTIN_A
12701 #undef RS6000_BUILTIN_D
12702 #undef RS6000_BUILTIN_H
12703 #undef RS6000_BUILTIN_P
12704 #undef RS6000_BUILTIN_X
12706 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12707 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12708 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12709 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12710 { MASK, ICODE, NAME, ENUM },
12712 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12713 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12714 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12715 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12716 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12718 static const struct builtin_description bdesc_3arg[] =
12720 #include "rs6000-builtin.def"
12723 /* DST operations: void foo (void *, const int, const char). */
12725 #undef RS6000_BUILTIN_0
12726 #undef RS6000_BUILTIN_1
12727 #undef RS6000_BUILTIN_2
12728 #undef RS6000_BUILTIN_3
12729 #undef RS6000_BUILTIN_A
12730 #undef RS6000_BUILTIN_D
12731 #undef RS6000_BUILTIN_H
12732 #undef RS6000_BUILTIN_P
12733 #undef RS6000_BUILTIN_X
12735 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12736 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12737 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12738 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12739 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12740 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12741 { MASK, ICODE, NAME, ENUM },
12743 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12744 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12745 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12747 static const struct builtin_description bdesc_dst[] =
12749 #include "rs6000-builtin.def"
12752 /* Simple binary operations: VECc = foo (VECa, VECb). */
12754 #undef RS6000_BUILTIN_0
12755 #undef RS6000_BUILTIN_1
12756 #undef RS6000_BUILTIN_2
12757 #undef RS6000_BUILTIN_3
12758 #undef RS6000_BUILTIN_A
12759 #undef RS6000_BUILTIN_D
12760 #undef RS6000_BUILTIN_H
12761 #undef RS6000_BUILTIN_P
12762 #undef RS6000_BUILTIN_X
12764 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12765 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12766 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12767 { MASK, ICODE, NAME, ENUM },
12769 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12770 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12771 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12772 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12773 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12774 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12776 static const struct builtin_description bdesc_2arg[] =
12778 #include "rs6000-builtin.def"
12781 #undef RS6000_BUILTIN_0
12782 #undef RS6000_BUILTIN_1
12783 #undef RS6000_BUILTIN_2
12784 #undef RS6000_BUILTIN_3
12785 #undef RS6000_BUILTIN_A
12786 #undef RS6000_BUILTIN_D
12787 #undef RS6000_BUILTIN_H
12788 #undef RS6000_BUILTIN_P
12789 #undef RS6000_BUILTIN_X
12791 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12792 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12793 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12794 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12795 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12796 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12797 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12798 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12799 { MASK, ICODE, NAME, ENUM },
12801 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12803 /* AltiVec predicates. */
12805 static const struct builtin_description bdesc_altivec_preds[] =
12807 #include "rs6000-builtin.def"
12810 /* ABS* operations. */
12812 #undef RS6000_BUILTIN_0
12813 #undef RS6000_BUILTIN_1
12814 #undef RS6000_BUILTIN_2
12815 #undef RS6000_BUILTIN_3
12816 #undef RS6000_BUILTIN_A
12817 #undef RS6000_BUILTIN_D
12818 #undef RS6000_BUILTIN_H
12819 #undef RS6000_BUILTIN_P
12820 #undef RS6000_BUILTIN_X
12822 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12823 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12824 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12825 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12827 { MASK, ICODE, NAME, ENUM },
12829 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12830 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12831 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12832 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12834 static const struct builtin_description bdesc_abs[] =
12836 #include "rs6000-builtin.def"
12839 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12840 foo (VECa). */
12842 #undef RS6000_BUILTIN_0
12843 #undef RS6000_BUILTIN_1
12844 #undef RS6000_BUILTIN_2
12845 #undef RS6000_BUILTIN_3
12846 #undef RS6000_BUILTIN_A
12847 #undef RS6000_BUILTIN_D
12848 #undef RS6000_BUILTIN_H
12849 #undef RS6000_BUILTIN_P
12850 #undef RS6000_BUILTIN_X
12852 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12853 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12854 { MASK, ICODE, NAME, ENUM },
12856 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12857 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12858 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12859 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12860 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12861 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12862 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12864 static const struct builtin_description bdesc_1arg[] =
12866 #include "rs6000-builtin.def"
12869 /* Simple no-argument operations: result = __builtin_darn_32 () */
12871 #undef RS6000_BUILTIN_0
12872 #undef RS6000_BUILTIN_1
12873 #undef RS6000_BUILTIN_2
12874 #undef RS6000_BUILTIN_3
12875 #undef RS6000_BUILTIN_A
12876 #undef RS6000_BUILTIN_D
12877 #undef RS6000_BUILTIN_H
12878 #undef RS6000_BUILTIN_P
12879 #undef RS6000_BUILTIN_X
12881 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12882 { MASK, ICODE, NAME, ENUM },
12884 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12885 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12886 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12887 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12888 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12889 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12890 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12893 static const struct builtin_description bdesc_0arg[] =
12895 #include "rs6000-builtin.def"
12898 /* HTM builtins. */
12899 #undef RS6000_BUILTIN_0
12900 #undef RS6000_BUILTIN_1
12901 #undef RS6000_BUILTIN_2
12902 #undef RS6000_BUILTIN_3
12903 #undef RS6000_BUILTIN_A
12904 #undef RS6000_BUILTIN_D
12905 #undef RS6000_BUILTIN_H
12906 #undef RS6000_BUILTIN_P
12907 #undef RS6000_BUILTIN_X
12909 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12910 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12911 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12912 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12913 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12914 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12915 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12916 { MASK, ICODE, NAME, ENUM },
12918 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12919 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12921 static const struct builtin_description bdesc_htm[] =
12923 #include "rs6000-builtin.def"
12926 #undef RS6000_BUILTIN_0
12927 #undef RS6000_BUILTIN_1
12928 #undef RS6000_BUILTIN_2
12929 #undef RS6000_BUILTIN_3
12930 #undef RS6000_BUILTIN_A
12931 #undef RS6000_BUILTIN_D
12932 #undef RS6000_BUILTIN_H
12933 #undef RS6000_BUILTIN_P
12935 /* Return true if a builtin function is overloaded. */
12936 bool
12937 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
12939 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
12942 const char *
12943 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
12945 return rs6000_builtin_info[(int)fncode].name;
12948 /* Expand an expression EXP that calls a builtin without arguments. */
12949 static rtx
12950 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
12952 rtx pat;
12953 machine_mode tmode = insn_data[icode].operand[0].mode;
12955 if (icode == CODE_FOR_nothing)
12956 /* Builtin not supported on this processor. */
12957 return 0;
12959 if (icode == CODE_FOR_rs6000_mffsl
12960 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
12962 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
12963 return const0_rtx;
12966 if (target == 0
12967 || GET_MODE (target) != tmode
12968 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12969 target = gen_reg_rtx (tmode);
12971 pat = GEN_FCN (icode) (target);
12972 if (! pat)
12973 return 0;
12974 emit_insn (pat);
12976 return target;
12980 static rtx
12981 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
12983 rtx pat;
12984 tree arg0 = CALL_EXPR_ARG (exp, 0);
12985 tree arg1 = CALL_EXPR_ARG (exp, 1);
12986 rtx op0 = expand_normal (arg0);
12987 rtx op1 = expand_normal (arg1);
12988 machine_mode mode0 = insn_data[icode].operand[0].mode;
12989 machine_mode mode1 = insn_data[icode].operand[1].mode;
12991 if (icode == CODE_FOR_nothing)
12992 /* Builtin not supported on this processor. */
12993 return 0;
12995 /* If we got invalid arguments bail out before generating bad rtl. */
12996 if (arg0 == error_mark_node || arg1 == error_mark_node)
12997 return const0_rtx;
12999 if (!CONST_INT_P (op0)
13000 || INTVAL (op0) > 255
13001 || INTVAL (op0) < 0)
13003 error ("argument 1 must be an 8-bit field value");
13004 return const0_rtx;
13007 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13008 op0 = copy_to_mode_reg (mode0, op0);
13010 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13011 op1 = copy_to_mode_reg (mode1, op1);
13013 pat = GEN_FCN (icode) (op0, op1);
13014 if (!pat)
13015 return const0_rtx;
13016 emit_insn (pat);
13018 return NULL_RTX;
13021 static rtx
13022 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13024 rtx pat;
13025 tree arg0 = CALL_EXPR_ARG (exp, 0);
13026 rtx op0 = expand_normal (arg0);
13028 if (icode == CODE_FOR_nothing)
13029 /* Builtin not supported on this processor. */
13030 return 0;
13032 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13034 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13035 "%<-msoft-float%>");
13036 return const0_rtx;
13039 /* If we got invalid arguments bail out before generating bad rtl. */
13040 if (arg0 == error_mark_node)
13041 return const0_rtx;
13043 /* Only allow bit numbers 0 to 31. */
13044 if (!u5bit_cint_operand (op0, VOIDmode))
13046 error ("Argument must be a constant between 0 and 31.");
13047 return const0_rtx;
13050 pat = GEN_FCN (icode) (op0);
13051 if (!pat)
13052 return const0_rtx;
13053 emit_insn (pat);
13055 return NULL_RTX;
13058 static rtx
13059 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13061 rtx pat;
13062 tree arg0 = CALL_EXPR_ARG (exp, 0);
13063 rtx op0 = expand_normal (arg0);
13064 machine_mode mode0 = insn_data[icode].operand[0].mode;
13066 if (icode == CODE_FOR_nothing)
13067 /* Builtin not supported on this processor. */
13068 return 0;
13070 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13072 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13073 return const0_rtx;
13076 /* If we got invalid arguments bail out before generating bad rtl. */
13077 if (arg0 == error_mark_node)
13078 return const0_rtx;
13080 /* If the argument is a constant, check the range. Argument can only be a
13081 2-bit value. Unfortunately, can't check the range of the value at
13082 compile time if the argument is a variable. The least significant two
13083 bits of the argument, regardless of type, are used to set the rounding
13084 mode. All other bits are ignored. */
13085 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13087 error ("Argument must be a value between 0 and 3.");
13088 return const0_rtx;
13091 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13092 op0 = copy_to_mode_reg (mode0, op0);
13094 pat = GEN_FCN (icode) (op0);
13095 if (!pat)
13096 return const0_rtx;
13097 emit_insn (pat);
13099 return NULL_RTX;
13101 static rtx
13102 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13104 rtx pat;
13105 tree arg0 = CALL_EXPR_ARG (exp, 0);
13106 rtx op0 = expand_normal (arg0);
13107 machine_mode mode0 = insn_data[icode].operand[0].mode;
13109 if (TARGET_32BIT)
13110 /* Builtin not supported in 32-bit mode. */
13111 fatal_error (input_location,
13112 "%<__builtin_set_fpscr_drn%> is not supported "
13113 "in 32-bit mode");
13115 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13117 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13118 return const0_rtx;
13121 if (icode == CODE_FOR_nothing)
13122 /* Builtin not supported on this processor. */
13123 return 0;
13125 /* If we got invalid arguments bail out before generating bad rtl. */
13126 if (arg0 == error_mark_node)
13127 return const0_rtx;
13129 /* If the argument is a constant, check the range. Agrument can only be a
13130 3-bit value. Unfortunately, can't check the range of the value at
13131 compile time if the argument is a variable. The least significant two
13132 bits of the argument, regardless of type, are used to set the rounding
13133 mode. All other bits are ignored. */
13134 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13136 error ("Argument must be a value between 0 and 7.");
13137 return const0_rtx;
13140 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13141 op0 = copy_to_mode_reg (mode0, op0);
13143 pat = GEN_FCN (icode) (op0);
13144 if (! pat)
13145 return const0_rtx;
13146 emit_insn (pat);
13148 return NULL_RTX;
13151 static rtx
13152 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13154 rtx pat;
13155 tree arg0 = CALL_EXPR_ARG (exp, 0);
13156 rtx op0 = expand_normal (arg0);
13157 machine_mode tmode = insn_data[icode].operand[0].mode;
13158 machine_mode mode0 = insn_data[icode].operand[1].mode;
13160 if (icode == CODE_FOR_nothing)
13161 /* Builtin not supported on this processor. */
13162 return 0;
13164 /* If we got invalid arguments bail out before generating bad rtl. */
13165 if (arg0 == error_mark_node)
13166 return const0_rtx;
13168 if (icode == CODE_FOR_altivec_vspltisb
13169 || icode == CODE_FOR_altivec_vspltish
13170 || icode == CODE_FOR_altivec_vspltisw)
13172 /* Only allow 5-bit *signed* literals. */
13173 if (!CONST_INT_P (op0)
13174 || INTVAL (op0) > 15
13175 || INTVAL (op0) < -16)
13177 error ("argument 1 must be a 5-bit signed literal");
13178 return CONST0_RTX (tmode);
13182 if (target == 0
13183 || GET_MODE (target) != tmode
13184 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13185 target = gen_reg_rtx (tmode);
13187 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13188 op0 = copy_to_mode_reg (mode0, op0);
13190 pat = GEN_FCN (icode) (target, op0);
13191 if (! pat)
13192 return 0;
13193 emit_insn (pat);
13195 return target;
13198 static rtx
13199 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13201 rtx pat, scratch1, scratch2;
13202 tree arg0 = CALL_EXPR_ARG (exp, 0);
13203 rtx op0 = expand_normal (arg0);
13204 machine_mode tmode = insn_data[icode].operand[0].mode;
13205 machine_mode mode0 = insn_data[icode].operand[1].mode;
13207 /* If we have invalid arguments, bail out before generating bad rtl. */
13208 if (arg0 == error_mark_node)
13209 return const0_rtx;
13211 if (target == 0
13212 || GET_MODE (target) != tmode
13213 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13214 target = gen_reg_rtx (tmode);
13216 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13217 op0 = copy_to_mode_reg (mode0, op0);
13219 scratch1 = gen_reg_rtx (mode0);
13220 scratch2 = gen_reg_rtx (mode0);
13222 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13223 if (! pat)
13224 return 0;
13225 emit_insn (pat);
13227 return target;
13230 static rtx
13231 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13233 rtx pat;
13234 tree arg0 = CALL_EXPR_ARG (exp, 0);
13235 tree arg1 = CALL_EXPR_ARG (exp, 1);
13236 rtx op0 = expand_normal (arg0);
13237 rtx op1 = expand_normal (arg1);
13238 machine_mode tmode = insn_data[icode].operand[0].mode;
13239 machine_mode mode0 = insn_data[icode].operand[1].mode;
13240 machine_mode mode1 = insn_data[icode].operand[2].mode;
13242 if (icode == CODE_FOR_nothing)
13243 /* Builtin not supported on this processor. */
13244 return 0;
13246 /* If we got invalid arguments bail out before generating bad rtl. */
13247 if (arg0 == error_mark_node || arg1 == error_mark_node)
13248 return const0_rtx;
13250 if (icode == CODE_FOR_unpackv1ti
13251 || icode == CODE_FOR_unpackkf
13252 || icode == CODE_FOR_unpacktf
13253 || icode == CODE_FOR_unpackif
13254 || icode == CODE_FOR_unpacktd)
13256 /* Only allow 1-bit unsigned literals. */
13257 STRIP_NOPS (arg1);
13258 if (TREE_CODE (arg1) != INTEGER_CST
13259 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13261 error ("argument 2 must be a 1-bit unsigned literal");
13262 return CONST0_RTX (tmode);
13265 else if (icode == CODE_FOR_altivec_vspltw)
13267 /* Only allow 2-bit unsigned literals. */
13268 STRIP_NOPS (arg1);
13269 if (TREE_CODE (arg1) != INTEGER_CST
13270 || TREE_INT_CST_LOW (arg1) & ~3)
13272 error ("argument 2 must be a 2-bit unsigned literal");
13273 return CONST0_RTX (tmode);
13276 else if (icode == CODE_FOR_altivec_vsplth)
13278 /* Only allow 3-bit unsigned literals. */
13279 STRIP_NOPS (arg1);
13280 if (TREE_CODE (arg1) != INTEGER_CST
13281 || TREE_INT_CST_LOW (arg1) & ~7)
13283 error ("argument 2 must be a 3-bit unsigned literal");
13284 return CONST0_RTX (tmode);
13287 else if (icode == CODE_FOR_altivec_vspltb)
13289 /* Only allow 4-bit unsigned literals. */
13290 STRIP_NOPS (arg1);
13291 if (TREE_CODE (arg1) != INTEGER_CST
13292 || TREE_INT_CST_LOW (arg1) & ~15)
13294 error ("argument 2 must be a 4-bit unsigned literal");
13295 return CONST0_RTX (tmode);
13298 else if (icode == CODE_FOR_altivec_vcfux
13299 || icode == CODE_FOR_altivec_vcfsx
13300 || icode == CODE_FOR_altivec_vctsxs
13301 || icode == CODE_FOR_altivec_vctuxs)
13303 /* Only allow 5-bit unsigned literals. */
13304 STRIP_NOPS (arg1);
13305 if (TREE_CODE (arg1) != INTEGER_CST
13306 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13308 error ("argument 2 must be a 5-bit unsigned literal");
13309 return CONST0_RTX (tmode);
13312 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13313 || icode == CODE_FOR_dfptstsfi_lt_dd
13314 || icode == CODE_FOR_dfptstsfi_gt_dd
13315 || icode == CODE_FOR_dfptstsfi_unordered_dd
13316 || icode == CODE_FOR_dfptstsfi_eq_td
13317 || icode == CODE_FOR_dfptstsfi_lt_td
13318 || icode == CODE_FOR_dfptstsfi_gt_td
13319 || icode == CODE_FOR_dfptstsfi_unordered_td)
13321 /* Only allow 6-bit unsigned literals. */
13322 STRIP_NOPS (arg0);
13323 if (TREE_CODE (arg0) != INTEGER_CST
13324 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13326 error ("argument 1 must be a 6-bit unsigned literal");
13327 return CONST0_RTX (tmode);
13330 else if (icode == CODE_FOR_xststdcqp_kf
13331 || icode == CODE_FOR_xststdcqp_tf
13332 || icode == CODE_FOR_xststdcdp
13333 || icode == CODE_FOR_xststdcsp
13334 || icode == CODE_FOR_xvtstdcdp
13335 || icode == CODE_FOR_xvtstdcsp)
13337 /* Only allow 7-bit unsigned literals. */
13338 STRIP_NOPS (arg1);
13339 if (TREE_CODE (arg1) != INTEGER_CST
13340 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13342 error ("argument 2 must be a 7-bit unsigned literal");
13343 return CONST0_RTX (tmode);
13347 if (target == 0
13348 || GET_MODE (target) != tmode
13349 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13350 target = gen_reg_rtx (tmode);
13352 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13353 op0 = copy_to_mode_reg (mode0, op0);
13354 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13355 op1 = copy_to_mode_reg (mode1, op1);
13357 pat = GEN_FCN (icode) (target, op0, op1);
13358 if (! pat)
13359 return 0;
13360 emit_insn (pat);
13362 return target;
13365 static rtx
13366 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13368 rtx pat, scratch;
13369 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13370 tree arg0 = CALL_EXPR_ARG (exp, 1);
13371 tree arg1 = CALL_EXPR_ARG (exp, 2);
13372 rtx op0 = expand_normal (arg0);
13373 rtx op1 = expand_normal (arg1);
13374 machine_mode tmode = SImode;
13375 machine_mode mode0 = insn_data[icode].operand[1].mode;
13376 machine_mode mode1 = insn_data[icode].operand[2].mode;
13377 int cr6_form_int;
13379 if (TREE_CODE (cr6_form) != INTEGER_CST)
13381 error ("argument 1 of %qs must be a constant",
13382 "__builtin_altivec_predicate");
13383 return const0_rtx;
13385 else
13386 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13388 gcc_assert (mode0 == mode1);
13390 /* If we have invalid arguments, bail out before generating bad rtl. */
13391 if (arg0 == error_mark_node || arg1 == error_mark_node)
13392 return const0_rtx;
13394 if (target == 0
13395 || GET_MODE (target) != tmode
13396 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13397 target = gen_reg_rtx (tmode);
13399 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13400 op0 = copy_to_mode_reg (mode0, op0);
13401 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13402 op1 = copy_to_mode_reg (mode1, op1);
13404 /* Note that for many of the relevant operations (e.g. cmpne or
13405 cmpeq) with float or double operands, it makes more sense for the
13406 mode of the allocated scratch register to select a vector of
13407 integer. But the choice to copy the mode of operand 0 was made
13408 long ago and there are no plans to change it. */
13409 scratch = gen_reg_rtx (mode0);
13411 pat = GEN_FCN (icode) (scratch, op0, op1);
13412 if (! pat)
13413 return 0;
13414 emit_insn (pat);
13416 /* The vec_any* and vec_all* predicates use the same opcodes for two
13417 different operations, but the bits in CR6 will be different
13418 depending on what information we want. So we have to play tricks
13419 with CR6 to get the right bits out.
13421 If you think this is disgusting, look at the specs for the
13422 AltiVec predicates. */
13424 switch (cr6_form_int)
13426 case 0:
13427 emit_insn (gen_cr6_test_for_zero (target));
13428 break;
13429 case 1:
13430 emit_insn (gen_cr6_test_for_zero_reverse (target));
13431 break;
13432 case 2:
13433 emit_insn (gen_cr6_test_for_lt (target));
13434 break;
13435 case 3:
13436 emit_insn (gen_cr6_test_for_lt_reverse (target));
13437 break;
13438 default:
13439 error ("argument 1 of %qs is out of range",
13440 "__builtin_altivec_predicate");
13441 break;
13444 return target;
13448 swap_endian_selector_for_mode (machine_mode mode)
13450 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13451 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13452 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13453 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13455 unsigned int *swaparray, i;
13456 rtx perm[16];
13458 switch (mode)
13460 case E_V1TImode:
13461 swaparray = swap1;
13462 break;
13463 case E_V2DFmode:
13464 case E_V2DImode:
13465 swaparray = swap2;
13466 break;
13467 case E_V4SFmode:
13468 case E_V4SImode:
13469 swaparray = swap4;
13470 break;
13471 case E_V8HImode:
13472 swaparray = swap8;
13473 break;
13474 default:
13475 gcc_unreachable ();
13478 for (i = 0; i < 16; ++i)
13479 perm[i] = GEN_INT (swaparray[i]);
13481 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13482 gen_rtvec_v (16, perm)));
13485 static rtx
13486 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13488 rtx pat, addr;
13489 tree arg0 = CALL_EXPR_ARG (exp, 0);
13490 tree arg1 = CALL_EXPR_ARG (exp, 1);
13491 machine_mode tmode = insn_data[icode].operand[0].mode;
13492 machine_mode mode0 = Pmode;
13493 machine_mode mode1 = Pmode;
13494 rtx op0 = expand_normal (arg0);
13495 rtx op1 = expand_normal (arg1);
13497 if (icode == CODE_FOR_nothing)
13498 /* Builtin not supported on this processor. */
13499 return 0;
13501 /* If we got invalid arguments bail out before generating bad rtl. */
13502 if (arg0 == error_mark_node || arg1 == error_mark_node)
13503 return const0_rtx;
13505 if (target == 0
13506 || GET_MODE (target) != tmode
13507 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13508 target = gen_reg_rtx (tmode);
13510 op1 = copy_to_mode_reg (mode1, op1);
13512 /* For LVX, express the RTL accurately by ANDing the address with -16.
13513 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13514 so the raw address is fine. */
13515 if (icode == CODE_FOR_altivec_lvx_v1ti
13516 || icode == CODE_FOR_altivec_lvx_v2df
13517 || icode == CODE_FOR_altivec_lvx_v2di
13518 || icode == CODE_FOR_altivec_lvx_v4sf
13519 || icode == CODE_FOR_altivec_lvx_v4si
13520 || icode == CODE_FOR_altivec_lvx_v8hi
13521 || icode == CODE_FOR_altivec_lvx_v16qi)
13523 rtx rawaddr;
13524 if (op0 == const0_rtx)
13525 rawaddr = op1;
13526 else
13528 op0 = copy_to_mode_reg (mode0, op0);
13529 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13531 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13532 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13534 emit_insn (gen_rtx_SET (target, addr));
13536 else
13538 if (op0 == const0_rtx)
13539 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13540 else
13542 op0 = copy_to_mode_reg (mode0, op0);
13543 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13544 gen_rtx_PLUS (Pmode, op1, op0));
13547 pat = GEN_FCN (icode) (target, addr);
13548 if (! pat)
13549 return 0;
13550 emit_insn (pat);
13553 return target;
13556 static rtx
13557 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13559 rtx pat;
13560 tree arg0 = CALL_EXPR_ARG (exp, 0);
13561 tree arg1 = CALL_EXPR_ARG (exp, 1);
13562 tree arg2 = CALL_EXPR_ARG (exp, 2);
13563 rtx op0 = expand_normal (arg0);
13564 rtx op1 = expand_normal (arg1);
13565 rtx op2 = expand_normal (arg2);
13566 machine_mode mode0 = insn_data[icode].operand[0].mode;
13567 machine_mode mode1 = insn_data[icode].operand[1].mode;
13568 machine_mode mode2 = insn_data[icode].operand[2].mode;
13570 if (icode == CODE_FOR_nothing)
13571 /* Builtin not supported on this processor. */
13572 return NULL_RTX;
13574 /* If we got invalid arguments bail out before generating bad rtl. */
13575 if (arg0 == error_mark_node
13576 || arg1 == error_mark_node
13577 || arg2 == error_mark_node)
13578 return NULL_RTX;
13580 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13581 op0 = copy_to_mode_reg (mode0, op0);
13582 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13583 op1 = copy_to_mode_reg (mode1, op1);
13584 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13585 op2 = copy_to_mode_reg (mode2, op2);
13587 pat = GEN_FCN (icode) (op0, op1, op2);
13588 if (pat)
13589 emit_insn (pat);
13591 return NULL_RTX;
13594 static rtx
13595 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13597 tree arg0 = CALL_EXPR_ARG (exp, 0);
13598 tree arg1 = CALL_EXPR_ARG (exp, 1);
13599 tree arg2 = CALL_EXPR_ARG (exp, 2);
13600 rtx op0 = expand_normal (arg0);
13601 rtx op1 = expand_normal (arg1);
13602 rtx op2 = expand_normal (arg2);
13603 rtx pat, addr, rawaddr;
13604 machine_mode tmode = insn_data[icode].operand[0].mode;
13605 machine_mode smode = insn_data[icode].operand[1].mode;
13606 machine_mode mode1 = Pmode;
13607 machine_mode mode2 = Pmode;
13609 /* Invalid arguments. Bail before doing anything stoopid! */
13610 if (arg0 == error_mark_node
13611 || arg1 == error_mark_node
13612 || arg2 == error_mark_node)
13613 return const0_rtx;
13615 op2 = copy_to_mode_reg (mode2, op2);
13617 /* For STVX, express the RTL accurately by ANDing the address with -16.
13618 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13619 so the raw address is fine. */
13620 if (icode == CODE_FOR_altivec_stvx_v2df
13621 || icode == CODE_FOR_altivec_stvx_v2di
13622 || icode == CODE_FOR_altivec_stvx_v4sf
13623 || icode == CODE_FOR_altivec_stvx_v4si
13624 || icode == CODE_FOR_altivec_stvx_v8hi
13625 || icode == CODE_FOR_altivec_stvx_v16qi)
13627 if (op1 == const0_rtx)
13628 rawaddr = op2;
13629 else
13631 op1 = copy_to_mode_reg (mode1, op1);
13632 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13635 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13636 addr = gen_rtx_MEM (tmode, addr);
13638 op0 = copy_to_mode_reg (tmode, op0);
13640 emit_insn (gen_rtx_SET (addr, op0));
13642 else
13644 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13645 op0 = copy_to_mode_reg (smode, op0);
13647 if (op1 == const0_rtx)
13648 addr = gen_rtx_MEM (tmode, op2);
13649 else
13651 op1 = copy_to_mode_reg (mode1, op1);
13652 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13655 pat = GEN_FCN (icode) (addr, op0);
13656 if (pat)
13657 emit_insn (pat);
13660 return NULL_RTX;
13663 /* Return the appropriate SPR number associated with the given builtin. */
13664 static inline HOST_WIDE_INT
13665 htm_spr_num (enum rs6000_builtins code)
13667 if (code == HTM_BUILTIN_GET_TFHAR
13668 || code == HTM_BUILTIN_SET_TFHAR)
13669 return TFHAR_SPR;
13670 else if (code == HTM_BUILTIN_GET_TFIAR
13671 || code == HTM_BUILTIN_SET_TFIAR)
13672 return TFIAR_SPR;
13673 else if (code == HTM_BUILTIN_GET_TEXASR
13674 || code == HTM_BUILTIN_SET_TEXASR)
13675 return TEXASR_SPR;
13676 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13677 || code == HTM_BUILTIN_SET_TEXASRU);
13678 return TEXASRU_SPR;
13681 /* Return the correct ICODE value depending on whether we are
13682 setting or reading the HTM SPRs. */
13683 static inline enum insn_code
13684 rs6000_htm_spr_icode (bool nonvoid)
13686 if (nonvoid)
13687 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13688 else
13689 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13692 /* Expand the HTM builtin in EXP and store the result in TARGET.
13693 Store true in *EXPANDEDP if we found a builtin to expand. */
13694 static rtx
13695 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13697 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13698 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13699 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13700 const struct builtin_description *d;
13701 size_t i;
13703 *expandedp = true;
13705 if (!TARGET_POWERPC64
13706 && (fcode == HTM_BUILTIN_TABORTDC
13707 || fcode == HTM_BUILTIN_TABORTDCI))
13709 size_t uns_fcode = (size_t)fcode;
13710 const char *name = rs6000_builtin_info[uns_fcode].name;
13711 error ("builtin %qs is only valid in 64-bit mode", name);
13712 return const0_rtx;
13715 /* Expand the HTM builtins. */
13716 d = bdesc_htm;
13717 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13718 if (d->code == fcode)
13720 rtx op[MAX_HTM_OPERANDS], pat;
13721 int nopnds = 0;
13722 tree arg;
13723 call_expr_arg_iterator iter;
13724 unsigned attr = rs6000_builtin_info[fcode].attr;
13725 enum insn_code icode = d->icode;
13726 const struct insn_operand_data *insn_op;
13727 bool uses_spr = (attr & RS6000_BTC_SPR);
13728 rtx cr = NULL_RTX;
13730 if (uses_spr)
13731 icode = rs6000_htm_spr_icode (nonvoid);
13732 insn_op = &insn_data[icode].operand[0];
13734 if (nonvoid)
13736 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13737 if (!target
13738 || GET_MODE (target) != tmode
13739 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13740 target = gen_reg_rtx (tmode);
13741 if (uses_spr)
13742 op[nopnds++] = target;
13745 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13747 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13748 return const0_rtx;
13750 insn_op = &insn_data[icode].operand[nopnds];
13752 op[nopnds] = expand_normal (arg);
13754 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13756 if (!strcmp (insn_op->constraint, "n"))
13758 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13759 if (!CONST_INT_P (op[nopnds]))
13760 error ("argument %d must be an unsigned literal", arg_num);
13761 else
13762 error ("argument %d is an unsigned literal that is "
13763 "out of range", arg_num);
13764 return const0_rtx;
13766 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13769 nopnds++;
13772 /* Handle the builtins for extended mnemonics. These accept
13773 no arguments, but map to builtins that take arguments. */
13774 switch (fcode)
13776 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13777 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13778 op[nopnds++] = GEN_INT (1);
13779 if (flag_checking)
13780 attr |= RS6000_BTC_UNARY;
13781 break;
13782 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13783 op[nopnds++] = GEN_INT (0);
13784 if (flag_checking)
13785 attr |= RS6000_BTC_UNARY;
13786 break;
13787 default:
13788 break;
13791 /* If this builtin accesses SPRs, then pass in the appropriate
13792 SPR number and SPR regno as the last two operands. */
13793 if (uses_spr)
13795 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13796 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13798 /* If this builtin accesses a CR, then pass in a scratch
13799 CR as the last operand. */
13800 else if (attr & RS6000_BTC_CR)
13801 { cr = gen_reg_rtx (CCmode);
13802 op[nopnds++] = cr;
13805 if (flag_checking)
13807 int expected_nopnds = 0;
13808 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13809 expected_nopnds = 1;
13810 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13811 expected_nopnds = 2;
13812 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13813 expected_nopnds = 3;
13814 if (!(attr & RS6000_BTC_VOID))
13815 expected_nopnds += 1;
13816 if (uses_spr)
13817 expected_nopnds += 1;
13819 gcc_assert (nopnds == expected_nopnds
13820 && nopnds <= MAX_HTM_OPERANDS);
13823 switch (nopnds)
13825 case 1:
13826 pat = GEN_FCN (icode) (op[0]);
13827 break;
13828 case 2:
13829 pat = GEN_FCN (icode) (op[0], op[1]);
13830 break;
13831 case 3:
13832 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13833 break;
13834 case 4:
13835 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13836 break;
13837 default:
13838 gcc_unreachable ();
13840 if (!pat)
13841 return NULL_RTX;
13842 emit_insn (pat);
13844 if (attr & RS6000_BTC_CR)
13846 if (fcode == HTM_BUILTIN_TBEGIN)
13848 /* Emit code to set TARGET to true or false depending on
13849 whether the tbegin. instruction successfully or failed
13850 to start a transaction. We do this by placing the 1's
13851 complement of CR's EQ bit into TARGET. */
13852 rtx scratch = gen_reg_rtx (SImode);
13853 emit_insn (gen_rtx_SET (scratch,
13854 gen_rtx_EQ (SImode, cr,
13855 const0_rtx)));
13856 emit_insn (gen_rtx_SET (target,
13857 gen_rtx_XOR (SImode, scratch,
13858 GEN_INT (1))));
13860 else
13862 /* Emit code to copy the 4-bit condition register field
13863 CR into the least significant end of register TARGET. */
13864 rtx scratch1 = gen_reg_rtx (SImode);
13865 rtx scratch2 = gen_reg_rtx (SImode);
13866 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13867 emit_insn (gen_movcc (subreg, cr));
13868 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13869 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13873 if (nonvoid)
13874 return target;
13875 return const0_rtx;
13878 *expandedp = false;
13879 return NULL_RTX;
13882 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13884 static rtx
13885 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13886 rtx target)
13888 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13889 if (fcode == RS6000_BUILTIN_CPU_INIT)
13890 return const0_rtx;
13892 if (target == 0 || GET_MODE (target) != SImode)
13893 target = gen_reg_rtx (SImode);
13895 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13896 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13897 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13898 to a STRING_CST. */
13899 if (TREE_CODE (arg) == ARRAY_REF
13900 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13901 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13902 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13903 arg = TREE_OPERAND (arg, 0);
13905 if (TREE_CODE (arg) != STRING_CST)
13907 error ("builtin %qs only accepts a string argument",
13908 rs6000_builtin_info[(size_t) fcode].name);
13909 return const0_rtx;
13912 if (fcode == RS6000_BUILTIN_CPU_IS)
13914 const char *cpu = TREE_STRING_POINTER (arg);
13915 rtx cpuid = NULL_RTX;
13916 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
13917 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
13919 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
13920 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
13921 break;
13923 if (cpuid == NULL_RTX)
13925 /* Invalid CPU argument. */
13926 error ("cpu %qs is an invalid argument to builtin %qs",
13927 cpu, rs6000_builtin_info[(size_t) fcode].name);
13928 return const0_rtx;
13931 rtx platform = gen_reg_rtx (SImode);
13932 rtx tcbmem = gen_const_mem (SImode,
13933 gen_rtx_PLUS (Pmode,
13934 gen_rtx_REG (Pmode, TLS_REGNUM),
13935 GEN_INT (TCB_PLATFORM_OFFSET)));
13936 emit_move_insn (platform, tcbmem);
13937 emit_insn (gen_eqsi3 (target, platform, cpuid));
13939 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
13941 const char *hwcap = TREE_STRING_POINTER (arg);
13942 rtx mask = NULL_RTX;
13943 int hwcap_offset;
13944 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
13945 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
13947 mask = GEN_INT (cpu_supports_info[i].mask);
13948 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
13949 break;
13951 if (mask == NULL_RTX)
13953 /* Invalid HWCAP argument. */
13954 error ("%s %qs is an invalid argument to builtin %qs",
13955 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
13956 return const0_rtx;
13959 rtx tcb_hwcap = gen_reg_rtx (SImode);
13960 rtx tcbmem = gen_const_mem (SImode,
13961 gen_rtx_PLUS (Pmode,
13962 gen_rtx_REG (Pmode, TLS_REGNUM),
13963 GEN_INT (hwcap_offset)));
13964 emit_move_insn (tcb_hwcap, tcbmem);
13965 rtx scratch1 = gen_reg_rtx (SImode);
13966 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
13967 rtx scratch2 = gen_reg_rtx (SImode);
13968 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
13969 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
13971 else
13972 gcc_unreachable ();
13974 /* Record that we have expanded a CPU builtin, so that we can later
13975 emit a reference to the special symbol exported by LIBC to ensure we
13976 do not link against an old LIBC that doesn't support this feature. */
13977 cpu_builtin_p = true;
13979 #else
13980 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
13981 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
13983 /* For old LIBCs, always return FALSE. */
13984 emit_move_insn (target, GEN_INT (0));
13985 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
13987 return target;
13990 static rtx
13991 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
13993 rtx pat;
13994 tree arg0 = CALL_EXPR_ARG (exp, 0);
13995 tree arg1 = CALL_EXPR_ARG (exp, 1);
13996 tree arg2 = CALL_EXPR_ARG (exp, 2);
13997 rtx op0 = expand_normal (arg0);
13998 rtx op1 = expand_normal (arg1);
13999 rtx op2 = expand_normal (arg2);
14000 machine_mode tmode = insn_data[icode].operand[0].mode;
14001 machine_mode mode0 = insn_data[icode].operand[1].mode;
14002 machine_mode mode1 = insn_data[icode].operand[2].mode;
14003 machine_mode mode2 = insn_data[icode].operand[3].mode;
14005 if (icode == CODE_FOR_nothing)
14006 /* Builtin not supported on this processor. */
14007 return 0;
14009 /* If we got invalid arguments bail out before generating bad rtl. */
14010 if (arg0 == error_mark_node
14011 || arg1 == error_mark_node
14012 || arg2 == error_mark_node)
14013 return const0_rtx;
14015 /* Check and prepare argument depending on the instruction code.
14017 Note that a switch statement instead of the sequence of tests
14018 would be incorrect as many of the CODE_FOR values could be
14019 CODE_FOR_nothing and that would yield multiple alternatives
14020 with identical values. We'd never reach here at runtime in
14021 this case. */
14022 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14023 || icode == CODE_FOR_altivec_vsldoi_v2df
14024 || icode == CODE_FOR_altivec_vsldoi_v4si
14025 || icode == CODE_FOR_altivec_vsldoi_v8hi
14026 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14028 /* Only allow 4-bit unsigned literals. */
14029 STRIP_NOPS (arg2);
14030 if (TREE_CODE (arg2) != INTEGER_CST
14031 || TREE_INT_CST_LOW (arg2) & ~0xf)
14033 error ("argument 3 must be a 4-bit unsigned literal");
14034 return CONST0_RTX (tmode);
14037 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14038 || icode == CODE_FOR_vsx_xxpermdi_v2di
14039 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14040 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14041 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14042 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14043 || icode == CODE_FOR_vsx_xxpermdi_v4si
14044 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14045 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14046 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14047 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14048 || icode == CODE_FOR_vsx_xxsldwi_v4si
14049 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14050 || icode == CODE_FOR_vsx_xxsldwi_v2di
14051 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14053 /* Only allow 2-bit unsigned literals. */
14054 STRIP_NOPS (arg2);
14055 if (TREE_CODE (arg2) != INTEGER_CST
14056 || TREE_INT_CST_LOW (arg2) & ~0x3)
14058 error ("argument 3 must be a 2-bit unsigned literal");
14059 return CONST0_RTX (tmode);
14062 else if (icode == CODE_FOR_vsx_set_v2df
14063 || icode == CODE_FOR_vsx_set_v2di
14064 || icode == CODE_FOR_bcdadd
14065 || icode == CODE_FOR_bcdadd_lt
14066 || icode == CODE_FOR_bcdadd_eq
14067 || icode == CODE_FOR_bcdadd_gt
14068 || icode == CODE_FOR_bcdsub
14069 || icode == CODE_FOR_bcdsub_lt
14070 || icode == CODE_FOR_bcdsub_eq
14071 || icode == CODE_FOR_bcdsub_gt)
14073 /* Only allow 1-bit unsigned literals. */
14074 STRIP_NOPS (arg2);
14075 if (TREE_CODE (arg2) != INTEGER_CST
14076 || TREE_INT_CST_LOW (arg2) & ~0x1)
14078 error ("argument 3 must be a 1-bit unsigned literal");
14079 return CONST0_RTX (tmode);
14082 else if (icode == CODE_FOR_dfp_ddedpd_dd
14083 || icode == CODE_FOR_dfp_ddedpd_td)
14085 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14086 STRIP_NOPS (arg0);
14087 if (TREE_CODE (arg0) != INTEGER_CST
14088 || TREE_INT_CST_LOW (arg2) & ~0x3)
14090 error ("argument 1 must be 0 or 2");
14091 return CONST0_RTX (tmode);
14094 else if (icode == CODE_FOR_dfp_denbcd_dd
14095 || icode == CODE_FOR_dfp_denbcd_td)
14097 /* Only allow 1-bit unsigned literals. */
14098 STRIP_NOPS (arg0);
14099 if (TREE_CODE (arg0) != INTEGER_CST
14100 || TREE_INT_CST_LOW (arg0) & ~0x1)
14102 error ("argument 1 must be a 1-bit unsigned literal");
14103 return CONST0_RTX (tmode);
14106 else if (icode == CODE_FOR_dfp_dscli_dd
14107 || icode == CODE_FOR_dfp_dscli_td
14108 || icode == CODE_FOR_dfp_dscri_dd
14109 || icode == CODE_FOR_dfp_dscri_td)
14111 /* Only allow 6-bit unsigned literals. */
14112 STRIP_NOPS (arg1);
14113 if (TREE_CODE (arg1) != INTEGER_CST
14114 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14116 error ("argument 2 must be a 6-bit unsigned literal");
14117 return CONST0_RTX (tmode);
14120 else if (icode == CODE_FOR_crypto_vshasigmaw
14121 || icode == CODE_FOR_crypto_vshasigmad)
14123 /* Check whether the 2nd and 3rd arguments are integer constants and in
14124 range and prepare arguments. */
14125 STRIP_NOPS (arg1);
14126 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14128 error ("argument 2 must be 0 or 1");
14129 return CONST0_RTX (tmode);
14132 STRIP_NOPS (arg2);
14133 if (TREE_CODE (arg2) != INTEGER_CST
14134 || wi::geu_p (wi::to_wide (arg2), 16))
14136 error ("argument 3 must be in the range [0, 15]");
14137 return CONST0_RTX (tmode);
14141 if (target == 0
14142 || GET_MODE (target) != tmode
14143 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14144 target = gen_reg_rtx (tmode);
14146 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14147 op0 = copy_to_mode_reg (mode0, op0);
14148 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14149 op1 = copy_to_mode_reg (mode1, op1);
14150 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14151 op2 = copy_to_mode_reg (mode2, op2);
14153 pat = GEN_FCN (icode) (target, op0, op1, op2);
14154 if (! pat)
14155 return 0;
14156 emit_insn (pat);
14158 return target;
14162 /* Expand the dst builtins. */
14163 static rtx
14164 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14165 bool *expandedp)
14167 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14168 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14169 tree arg0, arg1, arg2;
14170 machine_mode mode0, mode1;
14171 rtx pat, op0, op1, op2;
14172 const struct builtin_description *d;
14173 size_t i;
14175 *expandedp = false;
14177 /* Handle DST variants. */
14178 d = bdesc_dst;
14179 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14180 if (d->code == fcode)
14182 arg0 = CALL_EXPR_ARG (exp, 0);
14183 arg1 = CALL_EXPR_ARG (exp, 1);
14184 arg2 = CALL_EXPR_ARG (exp, 2);
14185 op0 = expand_normal (arg0);
14186 op1 = expand_normal (arg1);
14187 op2 = expand_normal (arg2);
14188 mode0 = insn_data[d->icode].operand[0].mode;
14189 mode1 = insn_data[d->icode].operand[1].mode;
14191 /* Invalid arguments, bail out before generating bad rtl. */
14192 if (arg0 == error_mark_node
14193 || arg1 == error_mark_node
14194 || arg2 == error_mark_node)
14195 return const0_rtx;
14197 *expandedp = true;
14198 STRIP_NOPS (arg2);
14199 if (TREE_CODE (arg2) != INTEGER_CST
14200 || TREE_INT_CST_LOW (arg2) & ~0x3)
14202 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14203 return const0_rtx;
14206 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14207 op0 = copy_to_mode_reg (Pmode, op0);
14208 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14209 op1 = copy_to_mode_reg (mode1, op1);
14211 pat = GEN_FCN (d->icode) (op0, op1, op2);
14212 if (pat != 0)
14213 emit_insn (pat);
14215 return NULL_RTX;
14218 return NULL_RTX;
14221 /* Expand vec_init builtin. */
14222 static rtx
14223 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14225 machine_mode tmode = TYPE_MODE (type);
14226 machine_mode inner_mode = GET_MODE_INNER (tmode);
14227 int i, n_elt = GET_MODE_NUNITS (tmode);
14229 gcc_assert (VECTOR_MODE_P (tmode));
14230 gcc_assert (n_elt == call_expr_nargs (exp));
14232 if (!target || !register_operand (target, tmode))
14233 target = gen_reg_rtx (tmode);
14235 /* If we have a vector compromised of a single element, such as V1TImode, do
14236 the initialization directly. */
14237 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14239 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14240 emit_move_insn (target, gen_lowpart (tmode, x));
14242 else
14244 rtvec v = rtvec_alloc (n_elt);
14246 for (i = 0; i < n_elt; ++i)
14248 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14249 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14252 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14255 return target;
14258 /* Return the integer constant in ARG. Constrain it to be in the range
14259 of the subparts of VEC_TYPE; issue an error if not. */
14261 static int
14262 get_element_number (tree vec_type, tree arg)
14264 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14266 if (!tree_fits_uhwi_p (arg)
14267 || (elt = tree_to_uhwi (arg), elt > max))
14269 error ("selector must be an integer constant in the range [0, %wi]", max);
14270 return 0;
14273 return elt;
14276 /* Expand vec_set builtin. */
14277 static rtx
14278 altivec_expand_vec_set_builtin (tree exp)
14280 machine_mode tmode, mode1;
14281 tree arg0, arg1, arg2;
14282 int elt;
14283 rtx op0, op1;
14285 arg0 = CALL_EXPR_ARG (exp, 0);
14286 arg1 = CALL_EXPR_ARG (exp, 1);
14287 arg2 = CALL_EXPR_ARG (exp, 2);
14289 tmode = TYPE_MODE (TREE_TYPE (arg0));
14290 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14291 gcc_assert (VECTOR_MODE_P (tmode));
14293 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14294 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14295 elt = get_element_number (TREE_TYPE (arg0), arg2);
14297 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14298 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14300 op0 = force_reg (tmode, op0);
14301 op1 = force_reg (mode1, op1);
14303 rs6000_expand_vector_set (op0, op1, elt);
14305 return op0;
14308 /* Expand vec_ext builtin. */
14309 static rtx
14310 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14312 machine_mode tmode, mode0;
14313 tree arg0, arg1;
14314 rtx op0;
14315 rtx op1;
14317 arg0 = CALL_EXPR_ARG (exp, 0);
14318 arg1 = CALL_EXPR_ARG (exp, 1);
14320 op0 = expand_normal (arg0);
14321 op1 = expand_normal (arg1);
14323 if (TREE_CODE (arg1) == INTEGER_CST)
14325 unsigned HOST_WIDE_INT elt;
14326 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14327 unsigned int truncated_selector;
14328 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14329 returns low-order bits of INTEGER_CST for modulo indexing. */
14330 elt = TREE_INT_CST_LOW (arg1);
14331 truncated_selector = elt % size;
14332 op1 = GEN_INT (truncated_selector);
14335 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14336 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14337 gcc_assert (VECTOR_MODE_P (mode0));
14339 op0 = force_reg (mode0, op0);
14341 if (optimize || !target || !register_operand (target, tmode))
14342 target = gen_reg_rtx (tmode);
14344 rs6000_expand_vector_extract (target, op0, op1);
14346 return target;
14349 /* Expand the builtin in EXP and store the result in TARGET. Store
14350 true in *EXPANDEDP if we found a builtin to expand. */
14351 static rtx
14352 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14354 const struct builtin_description *d;
14355 size_t i;
14356 enum insn_code icode;
14357 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14358 tree arg0, arg1, arg2;
14359 rtx op0, pat;
14360 machine_mode tmode, mode0;
14361 enum rs6000_builtins fcode
14362 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14364 if (rs6000_overloaded_builtin_p (fcode))
14366 *expandedp = true;
14367 error ("unresolved overload for Altivec builtin %qF", fndecl);
14369 /* Given it is invalid, just generate a normal call. */
14370 return expand_call (exp, target, false);
14373 target = altivec_expand_dst_builtin (exp, target, expandedp);
14374 if (*expandedp)
14375 return target;
14377 *expandedp = true;
14379 switch (fcode)
14381 case ALTIVEC_BUILTIN_STVX_V2DF:
14382 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14383 case ALTIVEC_BUILTIN_STVX_V2DI:
14384 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14385 case ALTIVEC_BUILTIN_STVX_V4SF:
14386 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14387 case ALTIVEC_BUILTIN_STVX:
14388 case ALTIVEC_BUILTIN_STVX_V4SI:
14389 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14390 case ALTIVEC_BUILTIN_STVX_V8HI:
14391 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14392 case ALTIVEC_BUILTIN_STVX_V16QI:
14393 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14394 case ALTIVEC_BUILTIN_STVEBX:
14395 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14396 case ALTIVEC_BUILTIN_STVEHX:
14397 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14398 case ALTIVEC_BUILTIN_STVEWX:
14399 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14400 case ALTIVEC_BUILTIN_STVXL_V2DF:
14401 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14402 case ALTIVEC_BUILTIN_STVXL_V2DI:
14403 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14404 case ALTIVEC_BUILTIN_STVXL_V4SF:
14405 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14406 case ALTIVEC_BUILTIN_STVXL:
14407 case ALTIVEC_BUILTIN_STVXL_V4SI:
14408 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14409 case ALTIVEC_BUILTIN_STVXL_V8HI:
14410 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14411 case ALTIVEC_BUILTIN_STVXL_V16QI:
14412 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14414 case ALTIVEC_BUILTIN_STVLX:
14415 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14416 case ALTIVEC_BUILTIN_STVLXL:
14417 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14418 case ALTIVEC_BUILTIN_STVRX:
14419 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14420 case ALTIVEC_BUILTIN_STVRXL:
14421 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14423 case P9V_BUILTIN_STXVL:
14424 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14426 case P9V_BUILTIN_XST_LEN_R:
14427 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14429 case VSX_BUILTIN_STXVD2X_V1TI:
14430 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14431 case VSX_BUILTIN_STXVD2X_V2DF:
14432 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14433 case VSX_BUILTIN_STXVD2X_V2DI:
14434 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14435 case VSX_BUILTIN_STXVW4X_V4SF:
14436 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14437 case VSX_BUILTIN_STXVW4X_V4SI:
14438 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14439 case VSX_BUILTIN_STXVW4X_V8HI:
14440 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14441 case VSX_BUILTIN_STXVW4X_V16QI:
14442 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14444 /* For the following on big endian, it's ok to use any appropriate
14445 unaligned-supporting store, so use a generic expander. For
14446 little-endian, the exact element-reversing instruction must
14447 be used. */
14448 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14450 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14451 : CODE_FOR_vsx_st_elemrev_v1ti);
14452 return altivec_expand_stv_builtin (code, exp);
14454 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14456 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14457 : CODE_FOR_vsx_st_elemrev_v2df);
14458 return altivec_expand_stv_builtin (code, exp);
14460 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14462 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14463 : CODE_FOR_vsx_st_elemrev_v2di);
14464 return altivec_expand_stv_builtin (code, exp);
14466 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14468 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14469 : CODE_FOR_vsx_st_elemrev_v4sf);
14470 return altivec_expand_stv_builtin (code, exp);
14472 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14474 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14475 : CODE_FOR_vsx_st_elemrev_v4si);
14476 return altivec_expand_stv_builtin (code, exp);
14478 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14480 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14481 : CODE_FOR_vsx_st_elemrev_v8hi);
14482 return altivec_expand_stv_builtin (code, exp);
14484 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14486 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14487 : CODE_FOR_vsx_st_elemrev_v16qi);
14488 return altivec_expand_stv_builtin (code, exp);
14491 case ALTIVEC_BUILTIN_MFVSCR:
14492 icode = CODE_FOR_altivec_mfvscr;
14493 tmode = insn_data[icode].operand[0].mode;
14495 if (target == 0
14496 || GET_MODE (target) != tmode
14497 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14498 target = gen_reg_rtx (tmode);
14500 pat = GEN_FCN (icode) (target);
14501 if (! pat)
14502 return 0;
14503 emit_insn (pat);
14504 return target;
14506 case ALTIVEC_BUILTIN_MTVSCR:
14507 icode = CODE_FOR_altivec_mtvscr;
14508 arg0 = CALL_EXPR_ARG (exp, 0);
14509 op0 = expand_normal (arg0);
14510 mode0 = insn_data[icode].operand[0].mode;
14512 /* If we got invalid arguments bail out before generating bad rtl. */
14513 if (arg0 == error_mark_node)
14514 return const0_rtx;
14516 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14517 op0 = copy_to_mode_reg (mode0, op0);
14519 pat = GEN_FCN (icode) (op0);
14520 if (pat)
14521 emit_insn (pat);
14522 return NULL_RTX;
14524 case ALTIVEC_BUILTIN_DSSALL:
14525 emit_insn (gen_altivec_dssall ());
14526 return NULL_RTX;
14528 case ALTIVEC_BUILTIN_DSS:
14529 icode = CODE_FOR_altivec_dss;
14530 arg0 = CALL_EXPR_ARG (exp, 0);
14531 STRIP_NOPS (arg0);
14532 op0 = expand_normal (arg0);
14533 mode0 = insn_data[icode].operand[0].mode;
14535 /* If we got invalid arguments bail out before generating bad rtl. */
14536 if (arg0 == error_mark_node)
14537 return const0_rtx;
14539 if (TREE_CODE (arg0) != INTEGER_CST
14540 || TREE_INT_CST_LOW (arg0) & ~0x3)
14542 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14543 return const0_rtx;
14546 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14547 op0 = copy_to_mode_reg (mode0, op0);
14549 emit_insn (gen_altivec_dss (op0));
14550 return NULL_RTX;
14552 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14553 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14554 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14555 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14556 case VSX_BUILTIN_VEC_INIT_V2DF:
14557 case VSX_BUILTIN_VEC_INIT_V2DI:
14558 case VSX_BUILTIN_VEC_INIT_V1TI:
14559 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14561 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14562 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14563 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14564 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14565 case VSX_BUILTIN_VEC_SET_V2DF:
14566 case VSX_BUILTIN_VEC_SET_V2DI:
14567 case VSX_BUILTIN_VEC_SET_V1TI:
14568 return altivec_expand_vec_set_builtin (exp);
14570 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14571 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14572 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14573 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14574 case VSX_BUILTIN_VEC_EXT_V2DF:
14575 case VSX_BUILTIN_VEC_EXT_V2DI:
14576 case VSX_BUILTIN_VEC_EXT_V1TI:
14577 return altivec_expand_vec_ext_builtin (exp, target);
14579 case P9V_BUILTIN_VEC_EXTRACT4B:
14580 arg1 = CALL_EXPR_ARG (exp, 1);
14581 STRIP_NOPS (arg1);
14583 /* Generate a normal call if it is invalid. */
14584 if (arg1 == error_mark_node)
14585 return expand_call (exp, target, false);
14587 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14589 error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
14590 return expand_call (exp, target, false);
14592 break;
14594 case P9V_BUILTIN_VEC_INSERT4B:
14595 arg2 = CALL_EXPR_ARG (exp, 2);
14596 STRIP_NOPS (arg2);
14598 /* Generate a normal call if it is invalid. */
14599 if (arg2 == error_mark_node)
14600 return expand_call (exp, target, false);
14602 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14604 error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
14605 return expand_call (exp, target, false);
14607 break;
14609 default:
14610 break;
14611 /* Fall through. */
14614 /* Expand abs* operations. */
14615 d = bdesc_abs;
14616 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14617 if (d->code == fcode)
14618 return altivec_expand_abs_builtin (d->icode, exp, target);
14620 /* Expand the AltiVec predicates. */
14621 d = bdesc_altivec_preds;
14622 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14623 if (d->code == fcode)
14624 return altivec_expand_predicate_builtin (d->icode, exp, target);
14626 /* LV* are funky. We initialized them differently. */
14627 switch (fcode)
14629 case ALTIVEC_BUILTIN_LVSL:
14630 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14631 exp, target, false);
14632 case ALTIVEC_BUILTIN_LVSR:
14633 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14634 exp, target, false);
14635 case ALTIVEC_BUILTIN_LVEBX:
14636 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14637 exp, target, false);
14638 case ALTIVEC_BUILTIN_LVEHX:
14639 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14640 exp, target, false);
14641 case ALTIVEC_BUILTIN_LVEWX:
14642 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14643 exp, target, false);
14644 case ALTIVEC_BUILTIN_LVXL_V2DF:
14645 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14646 exp, target, false);
14647 case ALTIVEC_BUILTIN_LVXL_V2DI:
14648 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14649 exp, target, false);
14650 case ALTIVEC_BUILTIN_LVXL_V4SF:
14651 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14652 exp, target, false);
14653 case ALTIVEC_BUILTIN_LVXL:
14654 case ALTIVEC_BUILTIN_LVXL_V4SI:
14655 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14656 exp, target, false);
14657 case ALTIVEC_BUILTIN_LVXL_V8HI:
14658 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14659 exp, target, false);
14660 case ALTIVEC_BUILTIN_LVXL_V16QI:
14661 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14662 exp, target, false);
14663 case ALTIVEC_BUILTIN_LVX_V1TI:
14664 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14665 exp, target, false);
14666 case ALTIVEC_BUILTIN_LVX_V2DF:
14667 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14668 exp, target, false);
14669 case ALTIVEC_BUILTIN_LVX_V2DI:
14670 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14671 exp, target, false);
14672 case ALTIVEC_BUILTIN_LVX_V4SF:
14673 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14674 exp, target, false);
14675 case ALTIVEC_BUILTIN_LVX:
14676 case ALTIVEC_BUILTIN_LVX_V4SI:
14677 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14678 exp, target, false);
14679 case ALTIVEC_BUILTIN_LVX_V8HI:
14680 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14681 exp, target, false);
14682 case ALTIVEC_BUILTIN_LVX_V16QI:
14683 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14684 exp, target, false);
14685 case ALTIVEC_BUILTIN_LVLX:
14686 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14687 exp, target, true);
14688 case ALTIVEC_BUILTIN_LVLXL:
14689 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14690 exp, target, true);
14691 case ALTIVEC_BUILTIN_LVRX:
14692 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14693 exp, target, true);
14694 case ALTIVEC_BUILTIN_LVRXL:
14695 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14696 exp, target, true);
14697 case VSX_BUILTIN_LXVD2X_V1TI:
14698 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14699 exp, target, false);
14700 case VSX_BUILTIN_LXVD2X_V2DF:
14701 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14702 exp, target, false);
14703 case VSX_BUILTIN_LXVD2X_V2DI:
14704 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14705 exp, target, false);
14706 case VSX_BUILTIN_LXVW4X_V4SF:
14707 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14708 exp, target, false);
14709 case VSX_BUILTIN_LXVW4X_V4SI:
14710 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14711 exp, target, false);
14712 case VSX_BUILTIN_LXVW4X_V8HI:
14713 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14714 exp, target, false);
14715 case VSX_BUILTIN_LXVW4X_V16QI:
14716 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14717 exp, target, false);
14718 /* For the following on big endian, it's ok to use any appropriate
14719 unaligned-supporting load, so use a generic expander. For
14720 little-endian, the exact element-reversing instruction must
14721 be used. */
14722 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14724 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14725 : CODE_FOR_vsx_ld_elemrev_v2df);
14726 return altivec_expand_lv_builtin (code, exp, target, false);
14728 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14730 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14731 : CODE_FOR_vsx_ld_elemrev_v1ti);
14732 return altivec_expand_lv_builtin (code, exp, target, false);
14734 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14736 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14737 : CODE_FOR_vsx_ld_elemrev_v2di);
14738 return altivec_expand_lv_builtin (code, exp, target, false);
14740 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14742 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14743 : CODE_FOR_vsx_ld_elemrev_v4sf);
14744 return altivec_expand_lv_builtin (code, exp, target, false);
14746 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14748 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14749 : CODE_FOR_vsx_ld_elemrev_v4si);
14750 return altivec_expand_lv_builtin (code, exp, target, false);
14752 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14754 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14755 : CODE_FOR_vsx_ld_elemrev_v8hi);
14756 return altivec_expand_lv_builtin (code, exp, target, false);
14758 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14760 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14761 : CODE_FOR_vsx_ld_elemrev_v16qi);
14762 return altivec_expand_lv_builtin (code, exp, target, false);
14764 break;
14765 default:
14766 break;
14767 /* Fall through. */
14770 *expandedp = false;
14771 return NULL_RTX;
14774 /* Check whether a builtin function is supported in this target
14775 configuration. */
14776 bool
14777 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14779 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14780 if ((fnmask & rs6000_builtin_mask) != fnmask)
14781 return false;
14782 else
14783 return true;
14786 /* Raise an error message for a builtin function that is called without the
14787 appropriate target options being set. */
14789 static void
14790 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14792 size_t uns_fncode = (size_t) fncode;
14793 const char *name = rs6000_builtin_info[uns_fncode].name;
14794 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14796 gcc_assert (name != NULL);
14797 if ((fnmask & RS6000_BTM_CELL) != 0)
14798 error ("%qs is only valid for the cell processor", name);
14799 else if ((fnmask & RS6000_BTM_VSX) != 0)
14800 error ("%qs requires the %qs option", name, "-mvsx");
14801 else if ((fnmask & RS6000_BTM_HTM) != 0)
14802 error ("%qs requires the %qs option", name, "-mhtm");
14803 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14804 error ("%qs requires the %qs option", name, "-maltivec");
14805 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14806 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14807 error ("%qs requires the %qs and %qs options", name, "-mhard-dfp",
14808 "-mpower8-vector");
14809 else if ((fnmask & RS6000_BTM_DFP) != 0)
14810 error ("%qs requires the %qs option", name, "-mhard-dfp");
14811 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14812 error ("%qs requires the %qs option", name, "-mpower8-vector");
14813 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14814 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14815 error ("%qs requires the %qs and %qs options", name, "-mcpu=power9",
14816 "-m64");
14817 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14818 error ("%qs requires the %qs option", name, "-mcpu=power9");
14819 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14820 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14821 error ("%qs requires the %qs and %qs options", name, "-mcpu=power9",
14822 "-m64");
14823 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14824 error ("%qs requires the %qs option", name, "-mcpu=power9");
14825 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14827 if (!TARGET_HARD_FLOAT)
14828 error ("%qs requires the %qs option", name, "-mhard-float");
14829 else
14830 error ("%qs requires the %qs option", name,
14831 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14833 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14834 error ("%qs requires the %qs option", name, "-mhard-float");
14835 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14836 error ("%qs requires ISA 3.0 IEEE 128-bit floating point", name);
14837 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14838 error ("%qs requires the %qs option", name, "%<-mfloat128%>");
14839 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14840 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14841 error ("%qs requires the %qs (or newer), and %qs or %qs options",
14842 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14843 else
14844 error ("%qs is not supported with the current options", name);
14847 /* Target hook for early folding of built-ins, shamelessly stolen
14848 from ia64.c. */
14850 static tree
14851 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14852 int n_args ATTRIBUTE_UNUSED,
14853 tree *args ATTRIBUTE_UNUSED,
14854 bool ignore ATTRIBUTE_UNUSED)
14856 #ifdef SUBTARGET_FOLD_BUILTIN
14857 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14858 #else
14859 return NULL_TREE;
14860 #endif
14863 /* Helper function to sort out which built-ins may be valid without having
14864 a LHS. */
14865 static bool
14866 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14868 switch (fn_code)
14870 case ALTIVEC_BUILTIN_STVX_V16QI:
14871 case ALTIVEC_BUILTIN_STVX_V8HI:
14872 case ALTIVEC_BUILTIN_STVX_V4SI:
14873 case ALTIVEC_BUILTIN_STVX_V4SF:
14874 case ALTIVEC_BUILTIN_STVX_V2DI:
14875 case ALTIVEC_BUILTIN_STVX_V2DF:
14876 case VSX_BUILTIN_STXVW4X_V16QI:
14877 case VSX_BUILTIN_STXVW4X_V8HI:
14878 case VSX_BUILTIN_STXVW4X_V4SF:
14879 case VSX_BUILTIN_STXVW4X_V4SI:
14880 case VSX_BUILTIN_STXVD2X_V2DF:
14881 case VSX_BUILTIN_STXVD2X_V2DI:
14882 return true;
14883 default:
14884 return false;
14888 /* Helper function to handle the gimple folding of a vector compare
14889 operation. This sets up true/false vectors, and uses the
14890 VEC_COND_EXPR operation.
14891 CODE indicates which comparison is to be made. (EQ, GT, ...).
14892 TYPE indicates the type of the result. */
14893 static tree
14894 fold_build_vec_cmp (tree_code code, tree type,
14895 tree arg0, tree arg1)
14897 tree cmp_type = build_same_sized_truth_vector_type (type);
14898 tree zero_vec = build_zero_cst (type);
14899 tree minus_one_vec = build_minus_one_cst (type);
14900 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14901 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14904 /* Helper function to handle the in-between steps for the
14905 vector compare built-ins. */
14906 static void
14907 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
14909 tree arg0 = gimple_call_arg (stmt, 0);
14910 tree arg1 = gimple_call_arg (stmt, 1);
14911 tree lhs = gimple_call_lhs (stmt);
14912 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
14913 gimple *g = gimple_build_assign (lhs, cmp);
14914 gimple_set_location (g, gimple_location (stmt));
14915 gsi_replace (gsi, g, true);
14918 /* Helper function to map V2DF and V4SF types to their
14919 integral equivalents (V2DI and V4SI). */
14920 tree map_to_integral_tree_type (tree input_tree_type)
14922 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
14923 return input_tree_type;
14924 else
14926 if (types_compatible_p (TREE_TYPE (input_tree_type),
14927 TREE_TYPE (V2DF_type_node)))
14928 return V2DI_type_node;
14929 else if (types_compatible_p (TREE_TYPE (input_tree_type),
14930 TREE_TYPE (V4SF_type_node)))
14931 return V4SI_type_node;
14932 else
14933 gcc_unreachable ();
14937 /* Helper function to handle the vector merge[hl] built-ins. The
14938 implementation difference between h and l versions for this code are in
14939 the values used when building of the permute vector for high word versus
14940 low word merge. The variance is keyed off the use_high parameter. */
14941 static void
14942 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
14944 tree arg0 = gimple_call_arg (stmt, 0);
14945 tree arg1 = gimple_call_arg (stmt, 1);
14946 tree lhs = gimple_call_lhs (stmt);
14947 tree lhs_type = TREE_TYPE (lhs);
14948 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14949 int midpoint = n_elts / 2;
14950 int offset = 0;
14952 if (use_high == 1)
14953 offset = midpoint;
14955 /* The permute_type will match the lhs for integral types. For double and
14956 float types, the permute type needs to map to the V2 or V4 type that
14957 matches size. */
14958 tree permute_type;
14959 permute_type = map_to_integral_tree_type (lhs_type);
14960 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14962 for (int i = 0; i < midpoint; i++)
14964 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14965 offset + i));
14966 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14967 offset + n_elts + i));
14970 tree permute = elts.build ();
14972 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
14973 gimple_set_location (g, gimple_location (stmt));
14974 gsi_replace (gsi, g, true);
14977 /* Helper function to handle the vector merge[eo] built-ins. */
14978 static void
14979 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
14981 tree arg0 = gimple_call_arg (stmt, 0);
14982 tree arg1 = gimple_call_arg (stmt, 1);
14983 tree lhs = gimple_call_lhs (stmt);
14984 tree lhs_type = TREE_TYPE (lhs);
14985 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14987 /* The permute_type will match the lhs for integral types. For double and
14988 float types, the permute type needs to map to the V2 or V4 type that
14989 matches size. */
14990 tree permute_type;
14991 permute_type = map_to_integral_tree_type (lhs_type);
14993 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14995 /* Build the permute vector. */
14996 for (int i = 0; i < n_elts / 2; i++)
14998 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14999 2*i + use_odd));
15000 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15001 2*i + use_odd + n_elts));
15004 tree permute = elts.build ();
15006 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15007 gimple_set_location (g, gimple_location (stmt));
15008 gsi_replace (gsi, g, true);
15011 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15012 a constant, use rs6000_fold_builtin.) */
15014 bool
15015 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15017 gimple *stmt = gsi_stmt (*gsi);
15018 tree fndecl = gimple_call_fndecl (stmt);
15019 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15020 enum rs6000_builtins fn_code
15021 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15022 tree arg0, arg1, lhs, temp;
15023 enum tree_code bcode;
15024 gimple *g;
15026 size_t uns_fncode = (size_t) fn_code;
15027 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15028 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15029 const char *fn_name2 = (icode != CODE_FOR_nothing)
15030 ? get_insn_name ((int) icode)
15031 : "nothing";
15033 if (TARGET_DEBUG_BUILTIN)
15034 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15035 fn_code, fn_name1, fn_name2);
15037 if (!rs6000_fold_gimple)
15038 return false;
15040 /* Prevent gimple folding for code that does not have a LHS, unless it is
15041 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15042 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15043 return false;
15045 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15046 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15047 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15048 if (!func_valid_p)
15049 return false;
15051 switch (fn_code)
15053 /* Flavors of vec_add. We deliberately don't expand
15054 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15055 TImode, resulting in much poorer code generation. */
15056 case ALTIVEC_BUILTIN_VADDUBM:
15057 case ALTIVEC_BUILTIN_VADDUHM:
15058 case ALTIVEC_BUILTIN_VADDUWM:
15059 case P8V_BUILTIN_VADDUDM:
15060 case ALTIVEC_BUILTIN_VADDFP:
15061 case VSX_BUILTIN_XVADDDP:
15062 bcode = PLUS_EXPR;
15063 do_binary:
15064 arg0 = gimple_call_arg (stmt, 0);
15065 arg1 = gimple_call_arg (stmt, 1);
15066 lhs = gimple_call_lhs (stmt);
15067 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15068 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15070 /* Ensure the binary operation is performed in a type
15071 that wraps if it is integral type. */
15072 gimple_seq stmts = NULL;
15073 tree type = unsigned_type_for (TREE_TYPE (lhs));
15074 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15075 type, arg0);
15076 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15077 type, arg1);
15078 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15079 type, uarg0, uarg1);
15080 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15081 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15082 build1 (VIEW_CONVERT_EXPR,
15083 TREE_TYPE (lhs), res));
15084 gsi_replace (gsi, g, true);
15085 return true;
15087 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15088 gimple_set_location (g, gimple_location (stmt));
15089 gsi_replace (gsi, g, true);
15090 return true;
15091 /* Flavors of vec_sub. We deliberately don't expand
15092 P8V_BUILTIN_VSUBUQM. */
15093 case ALTIVEC_BUILTIN_VSUBUBM:
15094 case ALTIVEC_BUILTIN_VSUBUHM:
15095 case ALTIVEC_BUILTIN_VSUBUWM:
15096 case P8V_BUILTIN_VSUBUDM:
15097 case ALTIVEC_BUILTIN_VSUBFP:
15098 case VSX_BUILTIN_XVSUBDP:
15099 bcode = MINUS_EXPR;
15100 goto do_binary;
15101 case VSX_BUILTIN_XVMULSP:
15102 case VSX_BUILTIN_XVMULDP:
15103 arg0 = gimple_call_arg (stmt, 0);
15104 arg1 = gimple_call_arg (stmt, 1);
15105 lhs = gimple_call_lhs (stmt);
15106 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15107 gimple_set_location (g, gimple_location (stmt));
15108 gsi_replace (gsi, g, true);
15109 return true;
15110 /* Even element flavors of vec_mul (signed). */
15111 case ALTIVEC_BUILTIN_VMULESB:
15112 case ALTIVEC_BUILTIN_VMULESH:
15113 case P8V_BUILTIN_VMULESW:
15114 /* Even element flavors of vec_mul (unsigned). */
15115 case ALTIVEC_BUILTIN_VMULEUB:
15116 case ALTIVEC_BUILTIN_VMULEUH:
15117 case P8V_BUILTIN_VMULEUW:
15118 arg0 = gimple_call_arg (stmt, 0);
15119 arg1 = gimple_call_arg (stmt, 1);
15120 lhs = gimple_call_lhs (stmt);
15121 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15122 gimple_set_location (g, gimple_location (stmt));
15123 gsi_replace (gsi, g, true);
15124 return true;
15125 /* Odd element flavors of vec_mul (signed). */
15126 case ALTIVEC_BUILTIN_VMULOSB:
15127 case ALTIVEC_BUILTIN_VMULOSH:
15128 case P8V_BUILTIN_VMULOSW:
15129 /* Odd element flavors of vec_mul (unsigned). */
15130 case ALTIVEC_BUILTIN_VMULOUB:
15131 case ALTIVEC_BUILTIN_VMULOUH:
15132 case P8V_BUILTIN_VMULOUW:
15133 arg0 = gimple_call_arg (stmt, 0);
15134 arg1 = gimple_call_arg (stmt, 1);
15135 lhs = gimple_call_lhs (stmt);
15136 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15137 gimple_set_location (g, gimple_location (stmt));
15138 gsi_replace (gsi, g, true);
15139 return true;
15140 /* Flavors of vec_div (Integer). */
15141 case VSX_BUILTIN_DIV_V2DI:
15142 case VSX_BUILTIN_UDIV_V2DI:
15143 arg0 = gimple_call_arg (stmt, 0);
15144 arg1 = gimple_call_arg (stmt, 1);
15145 lhs = gimple_call_lhs (stmt);
15146 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15147 gimple_set_location (g, gimple_location (stmt));
15148 gsi_replace (gsi, g, true);
15149 return true;
15150 /* Flavors of vec_div (Float). */
15151 case VSX_BUILTIN_XVDIVSP:
15152 case VSX_BUILTIN_XVDIVDP:
15153 arg0 = gimple_call_arg (stmt, 0);
15154 arg1 = gimple_call_arg (stmt, 1);
15155 lhs = gimple_call_lhs (stmt);
15156 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15157 gimple_set_location (g, gimple_location (stmt));
15158 gsi_replace (gsi, g, true);
15159 return true;
15160 /* Flavors of vec_and. */
15161 case ALTIVEC_BUILTIN_VAND:
15162 arg0 = gimple_call_arg (stmt, 0);
15163 arg1 = gimple_call_arg (stmt, 1);
15164 lhs = gimple_call_lhs (stmt);
15165 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15166 gimple_set_location (g, gimple_location (stmt));
15167 gsi_replace (gsi, g, true);
15168 return true;
15169 /* Flavors of vec_andc. */
15170 case ALTIVEC_BUILTIN_VANDC:
15171 arg0 = gimple_call_arg (stmt, 0);
15172 arg1 = gimple_call_arg (stmt, 1);
15173 lhs = gimple_call_lhs (stmt);
15174 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15175 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15176 gimple_set_location (g, gimple_location (stmt));
15177 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15178 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15179 gimple_set_location (g, gimple_location (stmt));
15180 gsi_replace (gsi, g, true);
15181 return true;
15182 /* Flavors of vec_nand. */
15183 case P8V_BUILTIN_VEC_NAND:
15184 case P8V_BUILTIN_NAND_V16QI:
15185 case P8V_BUILTIN_NAND_V8HI:
15186 case P8V_BUILTIN_NAND_V4SI:
15187 case P8V_BUILTIN_NAND_V4SF:
15188 case P8V_BUILTIN_NAND_V2DF:
15189 case P8V_BUILTIN_NAND_V2DI:
15190 arg0 = gimple_call_arg (stmt, 0);
15191 arg1 = gimple_call_arg (stmt, 1);
15192 lhs = gimple_call_lhs (stmt);
15193 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15194 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15195 gimple_set_location (g, gimple_location (stmt));
15196 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15197 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15198 gimple_set_location (g, gimple_location (stmt));
15199 gsi_replace (gsi, g, true);
15200 return true;
15201 /* Flavors of vec_or. */
15202 case ALTIVEC_BUILTIN_VOR:
15203 arg0 = gimple_call_arg (stmt, 0);
15204 arg1 = gimple_call_arg (stmt, 1);
15205 lhs = gimple_call_lhs (stmt);
15206 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15207 gimple_set_location (g, gimple_location (stmt));
15208 gsi_replace (gsi, g, true);
15209 return true;
15210 /* flavors of vec_orc. */
15211 case P8V_BUILTIN_ORC_V16QI:
15212 case P8V_BUILTIN_ORC_V8HI:
15213 case P8V_BUILTIN_ORC_V4SI:
15214 case P8V_BUILTIN_ORC_V4SF:
15215 case P8V_BUILTIN_ORC_V2DF:
15216 case P8V_BUILTIN_ORC_V2DI:
15217 arg0 = gimple_call_arg (stmt, 0);
15218 arg1 = gimple_call_arg (stmt, 1);
15219 lhs = gimple_call_lhs (stmt);
15220 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15221 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15222 gimple_set_location (g, gimple_location (stmt));
15223 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15224 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15225 gimple_set_location (g, gimple_location (stmt));
15226 gsi_replace (gsi, g, true);
15227 return true;
15228 /* Flavors of vec_xor. */
15229 case ALTIVEC_BUILTIN_VXOR:
15230 arg0 = gimple_call_arg (stmt, 0);
15231 arg1 = gimple_call_arg (stmt, 1);
15232 lhs = gimple_call_lhs (stmt);
15233 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15234 gimple_set_location (g, gimple_location (stmt));
15235 gsi_replace (gsi, g, true);
15236 return true;
15237 /* Flavors of vec_nor. */
15238 case ALTIVEC_BUILTIN_VNOR:
15239 arg0 = gimple_call_arg (stmt, 0);
15240 arg1 = gimple_call_arg (stmt, 1);
15241 lhs = gimple_call_lhs (stmt);
15242 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15243 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15244 gimple_set_location (g, gimple_location (stmt));
15245 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15246 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15247 gimple_set_location (g, gimple_location (stmt));
15248 gsi_replace (gsi, g, true);
15249 return true;
15250 /* flavors of vec_abs. */
15251 case ALTIVEC_BUILTIN_ABS_V16QI:
15252 case ALTIVEC_BUILTIN_ABS_V8HI:
15253 case ALTIVEC_BUILTIN_ABS_V4SI:
15254 case ALTIVEC_BUILTIN_ABS_V4SF:
15255 case P8V_BUILTIN_ABS_V2DI:
15256 case VSX_BUILTIN_XVABSDP:
15257 arg0 = gimple_call_arg (stmt, 0);
15258 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15259 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15260 return false;
15261 lhs = gimple_call_lhs (stmt);
15262 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15263 gimple_set_location (g, gimple_location (stmt));
15264 gsi_replace (gsi, g, true);
15265 return true;
15266 /* flavors of vec_min. */
15267 case VSX_BUILTIN_XVMINDP:
15268 case P8V_BUILTIN_VMINSD:
15269 case P8V_BUILTIN_VMINUD:
15270 case ALTIVEC_BUILTIN_VMINSB:
15271 case ALTIVEC_BUILTIN_VMINSH:
15272 case ALTIVEC_BUILTIN_VMINSW:
15273 case ALTIVEC_BUILTIN_VMINUB:
15274 case ALTIVEC_BUILTIN_VMINUH:
15275 case ALTIVEC_BUILTIN_VMINUW:
15276 case ALTIVEC_BUILTIN_VMINFP:
15277 arg0 = gimple_call_arg (stmt, 0);
15278 arg1 = gimple_call_arg (stmt, 1);
15279 lhs = gimple_call_lhs (stmt);
15280 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15281 gimple_set_location (g, gimple_location (stmt));
15282 gsi_replace (gsi, g, true);
15283 return true;
15284 /* flavors of vec_max. */
15285 case VSX_BUILTIN_XVMAXDP:
15286 case P8V_BUILTIN_VMAXSD:
15287 case P8V_BUILTIN_VMAXUD:
15288 case ALTIVEC_BUILTIN_VMAXSB:
15289 case ALTIVEC_BUILTIN_VMAXSH:
15290 case ALTIVEC_BUILTIN_VMAXSW:
15291 case ALTIVEC_BUILTIN_VMAXUB:
15292 case ALTIVEC_BUILTIN_VMAXUH:
15293 case ALTIVEC_BUILTIN_VMAXUW:
15294 case ALTIVEC_BUILTIN_VMAXFP:
15295 arg0 = gimple_call_arg (stmt, 0);
15296 arg1 = gimple_call_arg (stmt, 1);
15297 lhs = gimple_call_lhs (stmt);
15298 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15299 gimple_set_location (g, gimple_location (stmt));
15300 gsi_replace (gsi, g, true);
15301 return true;
15302 /* Flavors of vec_eqv. */
15303 case P8V_BUILTIN_EQV_V16QI:
15304 case P8V_BUILTIN_EQV_V8HI:
15305 case P8V_BUILTIN_EQV_V4SI:
15306 case P8V_BUILTIN_EQV_V4SF:
15307 case P8V_BUILTIN_EQV_V2DF:
15308 case P8V_BUILTIN_EQV_V2DI:
15309 arg0 = gimple_call_arg (stmt, 0);
15310 arg1 = gimple_call_arg (stmt, 1);
15311 lhs = gimple_call_lhs (stmt);
15312 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15313 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15314 gimple_set_location (g, gimple_location (stmt));
15315 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15316 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15317 gimple_set_location (g, gimple_location (stmt));
15318 gsi_replace (gsi, g, true);
15319 return true;
15320 /* Flavors of vec_rotate_left. */
15321 case ALTIVEC_BUILTIN_VRLB:
15322 case ALTIVEC_BUILTIN_VRLH:
15323 case ALTIVEC_BUILTIN_VRLW:
15324 case P8V_BUILTIN_VRLD:
15325 arg0 = gimple_call_arg (stmt, 0);
15326 arg1 = gimple_call_arg (stmt, 1);
15327 lhs = gimple_call_lhs (stmt);
15328 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15329 gimple_set_location (g, gimple_location (stmt));
15330 gsi_replace (gsi, g, true);
15331 return true;
15332 /* Flavors of vector shift right algebraic.
15333 vec_sra{b,h,w} -> vsra{b,h,w}. */
15334 case ALTIVEC_BUILTIN_VSRAB:
15335 case ALTIVEC_BUILTIN_VSRAH:
15336 case ALTIVEC_BUILTIN_VSRAW:
15337 case P8V_BUILTIN_VSRAD:
15339 arg0 = gimple_call_arg (stmt, 0);
15340 arg1 = gimple_call_arg (stmt, 1);
15341 lhs = gimple_call_lhs (stmt);
15342 tree arg1_type = TREE_TYPE (arg1);
15343 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15344 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15345 location_t loc = gimple_location (stmt);
15346 /* Force arg1 into the range valid matching the arg0 type. */
15347 /* Build a vector consisting of the max valid bit-size values. */
15348 int n_elts = VECTOR_CST_NELTS (arg1);
15349 tree element_size = build_int_cst (unsigned_element_type,
15350 128 / n_elts);
15351 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15352 for (int i = 0; i < n_elts; i++)
15353 elts.safe_push (element_size);
15354 tree modulo_tree = elts.build ();
15355 /* Modulo the provided shift value against that vector. */
15356 gimple_seq stmts = NULL;
15357 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15358 unsigned_arg1_type, arg1);
15359 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15360 unsigned_arg1_type, unsigned_arg1,
15361 modulo_tree);
15362 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15363 /* And finally, do the shift. */
15364 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15365 gimple_set_location (g, loc);
15366 gsi_replace (gsi, g, true);
15367 return true;
15369 /* Flavors of vector shift left.
15370 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15371 case ALTIVEC_BUILTIN_VSLB:
15372 case ALTIVEC_BUILTIN_VSLH:
15373 case ALTIVEC_BUILTIN_VSLW:
15374 case P8V_BUILTIN_VSLD:
15376 location_t loc;
15377 gimple_seq stmts = NULL;
15378 arg0 = gimple_call_arg (stmt, 0);
15379 tree arg0_type = TREE_TYPE (arg0);
15380 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15381 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15382 return false;
15383 arg1 = gimple_call_arg (stmt, 1);
15384 tree arg1_type = TREE_TYPE (arg1);
15385 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15386 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15387 loc = gimple_location (stmt);
15388 lhs = gimple_call_lhs (stmt);
15389 /* Force arg1 into the range valid matching the arg0 type. */
15390 /* Build a vector consisting of the max valid bit-size values. */
15391 int n_elts = VECTOR_CST_NELTS (arg1);
15392 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15393 * BITS_PER_UNIT;
15394 tree element_size = build_int_cst (unsigned_element_type,
15395 tree_size_in_bits / n_elts);
15396 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15397 for (int i = 0; i < n_elts; i++)
15398 elts.safe_push (element_size);
15399 tree modulo_tree = elts.build ();
15400 /* Modulo the provided shift value against that vector. */
15401 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15402 unsigned_arg1_type, arg1);
15403 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15404 unsigned_arg1_type, unsigned_arg1,
15405 modulo_tree);
15406 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15407 /* And finally, do the shift. */
15408 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15409 gimple_set_location (g, gimple_location (stmt));
15410 gsi_replace (gsi, g, true);
15411 return true;
15413 /* Flavors of vector shift right. */
15414 case ALTIVEC_BUILTIN_VSRB:
15415 case ALTIVEC_BUILTIN_VSRH:
15416 case ALTIVEC_BUILTIN_VSRW:
15417 case P8V_BUILTIN_VSRD:
15419 arg0 = gimple_call_arg (stmt, 0);
15420 arg1 = gimple_call_arg (stmt, 1);
15421 lhs = gimple_call_lhs (stmt);
15422 tree arg1_type = TREE_TYPE (arg1);
15423 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15424 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15425 location_t loc = gimple_location (stmt);
15426 gimple_seq stmts = NULL;
15427 /* Convert arg0 to unsigned. */
15428 tree arg0_unsigned
15429 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15430 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15431 /* Force arg1 into the range valid matching the arg0 type. */
15432 /* Build a vector consisting of the max valid bit-size values. */
15433 int n_elts = VECTOR_CST_NELTS (arg1);
15434 tree element_size = build_int_cst (unsigned_element_type,
15435 128 / n_elts);
15436 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15437 for (int i = 0; i < n_elts; i++)
15438 elts.safe_push (element_size);
15439 tree modulo_tree = elts.build ();
15440 /* Modulo the provided shift value against that vector. */
15441 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15442 unsigned_arg1_type, arg1);
15443 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15444 unsigned_arg1_type, unsigned_arg1,
15445 modulo_tree);
15446 /* Do the shift. */
15447 tree res
15448 = gimple_build (&stmts, RSHIFT_EXPR,
15449 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15450 /* Convert result back to the lhs type. */
15451 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15452 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15453 update_call_from_tree (gsi, res);
15454 return true;
15456 /* Vector loads. */
15457 case ALTIVEC_BUILTIN_LVX_V16QI:
15458 case ALTIVEC_BUILTIN_LVX_V8HI:
15459 case ALTIVEC_BUILTIN_LVX_V4SI:
15460 case ALTIVEC_BUILTIN_LVX_V4SF:
15461 case ALTIVEC_BUILTIN_LVX_V2DI:
15462 case ALTIVEC_BUILTIN_LVX_V2DF:
15463 case ALTIVEC_BUILTIN_LVX_V1TI:
15465 arg0 = gimple_call_arg (stmt, 0); // offset
15466 arg1 = gimple_call_arg (stmt, 1); // address
15467 lhs = gimple_call_lhs (stmt);
15468 location_t loc = gimple_location (stmt);
15469 /* Since arg1 may be cast to a different type, just use ptr_type_node
15470 here instead of trying to enforce TBAA on pointer types. */
15471 tree arg1_type = ptr_type_node;
15472 tree lhs_type = TREE_TYPE (lhs);
15473 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15474 the tree using the value from arg0. The resulting type will match
15475 the type of arg1. */
15476 gimple_seq stmts = NULL;
15477 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15478 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15479 arg1_type, arg1, temp_offset);
15480 /* Mask off any lower bits from the address. */
15481 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15482 arg1_type, temp_addr,
15483 build_int_cst (arg1_type, -16));
15484 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15485 if (!is_gimple_mem_ref_addr (aligned_addr))
15487 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15488 gimple *g = gimple_build_assign (t, aligned_addr);
15489 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15490 aligned_addr = t;
15492 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15493 take an offset, but since we've already incorporated the offset
15494 above, here we just pass in a zero. */
15495 gimple *g
15496 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15497 build_int_cst (arg1_type, 0)));
15498 gimple_set_location (g, loc);
15499 gsi_replace (gsi, g, true);
15500 return true;
15502 /* Vector stores. */
15503 case ALTIVEC_BUILTIN_STVX_V16QI:
15504 case ALTIVEC_BUILTIN_STVX_V8HI:
15505 case ALTIVEC_BUILTIN_STVX_V4SI:
15506 case ALTIVEC_BUILTIN_STVX_V4SF:
15507 case ALTIVEC_BUILTIN_STVX_V2DI:
15508 case ALTIVEC_BUILTIN_STVX_V2DF:
15510 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15511 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15512 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15513 location_t loc = gimple_location (stmt);
15514 tree arg0_type = TREE_TYPE (arg0);
15515 /* Use ptr_type_node (no TBAA) for the arg2_type.
15516 FIXME: (Richard) "A proper fix would be to transition this type as
15517 seen from the frontend to GIMPLE, for example in a similar way we
15518 do for MEM_REFs by piggy-backing that on an extra argument, a
15519 constant zero pointer of the alias pointer type to use (which would
15520 also serve as a type indicator of the store itself). I'd use a
15521 target specific internal function for this (not sure if we can have
15522 those target specific, but I guess if it's folded away then that's
15523 fine) and get away with the overload set." */
15524 tree arg2_type = ptr_type_node;
15525 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15526 the tree using the value from arg0. The resulting type will match
15527 the type of arg2. */
15528 gimple_seq stmts = NULL;
15529 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15530 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15531 arg2_type, arg2, temp_offset);
15532 /* Mask off any lower bits from the address. */
15533 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15534 arg2_type, temp_addr,
15535 build_int_cst (arg2_type, -16));
15536 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15537 if (!is_gimple_mem_ref_addr (aligned_addr))
15539 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15540 gimple *g = gimple_build_assign (t, aligned_addr);
15541 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15542 aligned_addr = t;
15544 /* The desired gimple result should be similar to:
15545 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15546 gimple *g
15547 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15548 build_int_cst (arg2_type, 0)), arg0);
15549 gimple_set_location (g, loc);
15550 gsi_replace (gsi, g, true);
15551 return true;
15554 /* unaligned Vector loads. */
15555 case VSX_BUILTIN_LXVW4X_V16QI:
15556 case VSX_BUILTIN_LXVW4X_V8HI:
15557 case VSX_BUILTIN_LXVW4X_V4SF:
15558 case VSX_BUILTIN_LXVW4X_V4SI:
15559 case VSX_BUILTIN_LXVD2X_V2DF:
15560 case VSX_BUILTIN_LXVD2X_V2DI:
15562 arg0 = gimple_call_arg (stmt, 0); // offset
15563 arg1 = gimple_call_arg (stmt, 1); // address
15564 lhs = gimple_call_lhs (stmt);
15565 location_t loc = gimple_location (stmt);
15566 /* Since arg1 may be cast to a different type, just use ptr_type_node
15567 here instead of trying to enforce TBAA on pointer types. */
15568 tree arg1_type = ptr_type_node;
15569 tree lhs_type = TREE_TYPE (lhs);
15570 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15571 required alignment (power) is 4 bytes regardless of data type. */
15572 tree align_ltype = build_aligned_type (lhs_type, 4);
15573 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15574 the tree using the value from arg0. The resulting type will match
15575 the type of arg1. */
15576 gimple_seq stmts = NULL;
15577 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15578 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15579 arg1_type, arg1, temp_offset);
15580 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15581 if (!is_gimple_mem_ref_addr (temp_addr))
15583 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15584 gimple *g = gimple_build_assign (t, temp_addr);
15585 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15586 temp_addr = t;
15588 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15589 take an offset, but since we've already incorporated the offset
15590 above, here we just pass in a zero. */
15591 gimple *g;
15592 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15593 build_int_cst (arg1_type, 0)));
15594 gimple_set_location (g, loc);
15595 gsi_replace (gsi, g, true);
15596 return true;
15599 /* unaligned Vector stores. */
15600 case VSX_BUILTIN_STXVW4X_V16QI:
15601 case VSX_BUILTIN_STXVW4X_V8HI:
15602 case VSX_BUILTIN_STXVW4X_V4SF:
15603 case VSX_BUILTIN_STXVW4X_V4SI:
15604 case VSX_BUILTIN_STXVD2X_V2DF:
15605 case VSX_BUILTIN_STXVD2X_V2DI:
15607 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15608 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15609 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15610 location_t loc = gimple_location (stmt);
15611 tree arg0_type = TREE_TYPE (arg0);
15612 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15613 tree arg2_type = ptr_type_node;
15614 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15615 required alignment (power) is 4 bytes regardless of data type. */
15616 tree align_stype = build_aligned_type (arg0_type, 4);
15617 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15618 the tree using the value from arg1. */
15619 gimple_seq stmts = NULL;
15620 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15621 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15622 arg2_type, arg2, temp_offset);
15623 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15624 if (!is_gimple_mem_ref_addr (temp_addr))
15626 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15627 gimple *g = gimple_build_assign (t, temp_addr);
15628 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15629 temp_addr = t;
15631 gimple *g;
15632 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15633 build_int_cst (arg2_type, 0)), arg0);
15634 gimple_set_location (g, loc);
15635 gsi_replace (gsi, g, true);
15636 return true;
15639 /* Vector Fused multiply-add (fma). */
15640 case ALTIVEC_BUILTIN_VMADDFP:
15641 case VSX_BUILTIN_XVMADDDP:
15642 case ALTIVEC_BUILTIN_VMLADDUHM:
15644 arg0 = gimple_call_arg (stmt, 0);
15645 arg1 = gimple_call_arg (stmt, 1);
15646 tree arg2 = gimple_call_arg (stmt, 2);
15647 lhs = gimple_call_lhs (stmt);
15648 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15649 gimple_call_set_lhs (g, lhs);
15650 gimple_call_set_nothrow (g, true);
15651 gimple_set_location (g, gimple_location (stmt));
15652 gsi_replace (gsi, g, true);
15653 return true;
15656 /* Vector compares; EQ, NE, GE, GT, LE. */
15657 case ALTIVEC_BUILTIN_VCMPEQUB:
15658 case ALTIVEC_BUILTIN_VCMPEQUH:
15659 case ALTIVEC_BUILTIN_VCMPEQUW:
15660 case P8V_BUILTIN_VCMPEQUD:
15661 fold_compare_helper (gsi, EQ_EXPR, stmt);
15662 return true;
15664 case P9V_BUILTIN_CMPNEB:
15665 case P9V_BUILTIN_CMPNEH:
15666 case P9V_BUILTIN_CMPNEW:
15667 fold_compare_helper (gsi, NE_EXPR, stmt);
15668 return true;
15670 case VSX_BUILTIN_CMPGE_16QI:
15671 case VSX_BUILTIN_CMPGE_U16QI:
15672 case VSX_BUILTIN_CMPGE_8HI:
15673 case VSX_BUILTIN_CMPGE_U8HI:
15674 case VSX_BUILTIN_CMPGE_4SI:
15675 case VSX_BUILTIN_CMPGE_U4SI:
15676 case VSX_BUILTIN_CMPGE_2DI:
15677 case VSX_BUILTIN_CMPGE_U2DI:
15678 fold_compare_helper (gsi, GE_EXPR, stmt);
15679 return true;
15681 case ALTIVEC_BUILTIN_VCMPGTSB:
15682 case ALTIVEC_BUILTIN_VCMPGTUB:
15683 case ALTIVEC_BUILTIN_VCMPGTSH:
15684 case ALTIVEC_BUILTIN_VCMPGTUH:
15685 case ALTIVEC_BUILTIN_VCMPGTSW:
15686 case ALTIVEC_BUILTIN_VCMPGTUW:
15687 case P8V_BUILTIN_VCMPGTUD:
15688 case P8V_BUILTIN_VCMPGTSD:
15689 fold_compare_helper (gsi, GT_EXPR, stmt);
15690 return true;
15692 case VSX_BUILTIN_CMPLE_16QI:
15693 case VSX_BUILTIN_CMPLE_U16QI:
15694 case VSX_BUILTIN_CMPLE_8HI:
15695 case VSX_BUILTIN_CMPLE_U8HI:
15696 case VSX_BUILTIN_CMPLE_4SI:
15697 case VSX_BUILTIN_CMPLE_U4SI:
15698 case VSX_BUILTIN_CMPLE_2DI:
15699 case VSX_BUILTIN_CMPLE_U2DI:
15700 fold_compare_helper (gsi, LE_EXPR, stmt);
15701 return true;
15703 /* flavors of vec_splat_[us]{8,16,32}. */
15704 case ALTIVEC_BUILTIN_VSPLTISB:
15705 case ALTIVEC_BUILTIN_VSPLTISH:
15706 case ALTIVEC_BUILTIN_VSPLTISW:
15708 arg0 = gimple_call_arg (stmt, 0);
15709 lhs = gimple_call_lhs (stmt);
15711 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15712 5-bit signed constant in range -16 to +15. */
15713 if (TREE_CODE (arg0) != INTEGER_CST
15714 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15715 return false;
15716 gimple_seq stmts = NULL;
15717 location_t loc = gimple_location (stmt);
15718 tree splat_value = gimple_convert (&stmts, loc,
15719 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15720 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15721 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15722 g = gimple_build_assign (lhs, splat_tree);
15723 gimple_set_location (g, gimple_location (stmt));
15724 gsi_replace (gsi, g, true);
15725 return true;
15728 /* Flavors of vec_splat. */
15729 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15730 case ALTIVEC_BUILTIN_VSPLTB:
15731 case ALTIVEC_BUILTIN_VSPLTH:
15732 case ALTIVEC_BUILTIN_VSPLTW:
15733 case VSX_BUILTIN_XXSPLTD_V2DI:
15734 case VSX_BUILTIN_XXSPLTD_V2DF:
15736 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15737 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15738 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15739 is a valid index into the arg0 vector. */
15740 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15741 if (TREE_CODE (arg1) != INTEGER_CST
15742 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15743 return false;
15744 lhs = gimple_call_lhs (stmt);
15745 tree lhs_type = TREE_TYPE (lhs);
15746 tree arg0_type = TREE_TYPE (arg0);
15747 tree splat;
15748 if (TREE_CODE (arg0) == VECTOR_CST)
15749 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15750 else
15752 /* Determine (in bits) the length and start location of the
15753 splat value for a call to the tree_vec_extract helper. */
15754 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15755 * BITS_PER_UNIT / n_elts;
15756 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15757 tree len = build_int_cst (bitsizetype, splat_elem_size);
15758 tree start = build_int_cst (bitsizetype, splat_start_bit);
15759 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15760 len, start);
15762 /* And finally, build the new vector. */
15763 tree splat_tree = build_vector_from_val (lhs_type, splat);
15764 g = gimple_build_assign (lhs, splat_tree);
15765 gimple_set_location (g, gimple_location (stmt));
15766 gsi_replace (gsi, g, true);
15767 return true;
15770 /* vec_mergel (integrals). */
15771 case ALTIVEC_BUILTIN_VMRGLH:
15772 case ALTIVEC_BUILTIN_VMRGLW:
15773 case VSX_BUILTIN_XXMRGLW_4SI:
15774 case ALTIVEC_BUILTIN_VMRGLB:
15775 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15776 case VSX_BUILTIN_XXMRGLW_4SF:
15777 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15778 fold_mergehl_helper (gsi, stmt, 1);
15779 return true;
15780 /* vec_mergeh (integrals). */
15781 case ALTIVEC_BUILTIN_VMRGHH:
15782 case ALTIVEC_BUILTIN_VMRGHW:
15783 case VSX_BUILTIN_XXMRGHW_4SI:
15784 case ALTIVEC_BUILTIN_VMRGHB:
15785 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15786 case VSX_BUILTIN_XXMRGHW_4SF:
15787 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15788 fold_mergehl_helper (gsi, stmt, 0);
15789 return true;
15791 /* Flavors of vec_mergee. */
15792 case P8V_BUILTIN_VMRGEW_V4SI:
15793 case P8V_BUILTIN_VMRGEW_V2DI:
15794 case P8V_BUILTIN_VMRGEW_V4SF:
15795 case P8V_BUILTIN_VMRGEW_V2DF:
15796 fold_mergeeo_helper (gsi, stmt, 0);
15797 return true;
15798 /* Flavors of vec_mergeo. */
15799 case P8V_BUILTIN_VMRGOW_V4SI:
15800 case P8V_BUILTIN_VMRGOW_V2DI:
15801 case P8V_BUILTIN_VMRGOW_V4SF:
15802 case P8V_BUILTIN_VMRGOW_V2DF:
15803 fold_mergeeo_helper (gsi, stmt, 1);
15804 return true;
15806 /* d = vec_pack (a, b) */
15807 case P8V_BUILTIN_VPKUDUM:
15808 case ALTIVEC_BUILTIN_VPKUHUM:
15809 case ALTIVEC_BUILTIN_VPKUWUM:
15811 arg0 = gimple_call_arg (stmt, 0);
15812 arg1 = gimple_call_arg (stmt, 1);
15813 lhs = gimple_call_lhs (stmt);
15814 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15815 gimple_set_location (g, gimple_location (stmt));
15816 gsi_replace (gsi, g, true);
15817 return true;
15820 /* d = vec_unpackh (a) */
15821 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15822 in this code is sensitive to endian-ness, and needs to be inverted to
15823 handle both LE and BE targets. */
15824 case ALTIVEC_BUILTIN_VUPKHSB:
15825 case ALTIVEC_BUILTIN_VUPKHSH:
15826 case P8V_BUILTIN_VUPKHSW:
15828 arg0 = gimple_call_arg (stmt, 0);
15829 lhs = gimple_call_lhs (stmt);
15830 if (BYTES_BIG_ENDIAN)
15831 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15832 else
15833 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15834 gimple_set_location (g, gimple_location (stmt));
15835 gsi_replace (gsi, g, true);
15836 return true;
15838 /* d = vec_unpackl (a) */
15839 case ALTIVEC_BUILTIN_VUPKLSB:
15840 case ALTIVEC_BUILTIN_VUPKLSH:
15841 case P8V_BUILTIN_VUPKLSW:
15843 arg0 = gimple_call_arg (stmt, 0);
15844 lhs = gimple_call_lhs (stmt);
15845 if (BYTES_BIG_ENDIAN)
15846 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15847 else
15848 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15849 gimple_set_location (g, gimple_location (stmt));
15850 gsi_replace (gsi, g, true);
15851 return true;
15853 /* There is no gimple type corresponding with pixel, so just return. */
15854 case ALTIVEC_BUILTIN_VUPKHPX:
15855 case ALTIVEC_BUILTIN_VUPKLPX:
15856 return false;
15858 /* vec_perm. */
15859 case ALTIVEC_BUILTIN_VPERM_16QI:
15860 case ALTIVEC_BUILTIN_VPERM_8HI:
15861 case ALTIVEC_BUILTIN_VPERM_4SI:
15862 case ALTIVEC_BUILTIN_VPERM_2DI:
15863 case ALTIVEC_BUILTIN_VPERM_4SF:
15864 case ALTIVEC_BUILTIN_VPERM_2DF:
15866 arg0 = gimple_call_arg (stmt, 0);
15867 arg1 = gimple_call_arg (stmt, 1);
15868 tree permute = gimple_call_arg (stmt, 2);
15869 lhs = gimple_call_lhs (stmt);
15870 location_t loc = gimple_location (stmt);
15871 gimple_seq stmts = NULL;
15872 // convert arg0 and arg1 to match the type of the permute
15873 // for the VEC_PERM_EXPR operation.
15874 tree permute_type = (TREE_TYPE (permute));
15875 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15876 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15877 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15878 permute_type, arg0_ptype, arg1_ptype,
15879 permute);
15880 // Convert the result back to the desired lhs type upon completion.
15881 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15882 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15883 g = gimple_build_assign (lhs, temp);
15884 gimple_set_location (g, loc);
15885 gsi_replace (gsi, g, true);
15886 return true;
15889 default:
15890 if (TARGET_DEBUG_BUILTIN)
15891 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15892 fn_code, fn_name1, fn_name2);
15893 break;
15896 return false;
15899 /* Expand an expression EXP that calls a built-in function,
15900 with result going to TARGET if that's convenient
15901 (and in mode MODE if that's convenient).
15902 SUBTARGET may be used as the target for computing one of EXP's operands.
15903 IGNORE is nonzero if the value is to be ignored. */
15905 static rtx
15906 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15907 machine_mode mode ATTRIBUTE_UNUSED,
15908 int ignore ATTRIBUTE_UNUSED)
15910 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15911 enum rs6000_builtins fcode
15912 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15913 size_t uns_fcode = (size_t)fcode;
15914 const struct builtin_description *d;
15915 size_t i;
15916 rtx ret;
15917 bool success;
15918 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15919 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15920 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15922 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15923 floating point type, depending on whether long double is the IBM extended
15924 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
15925 we only define one variant of the built-in function, and switch the code
15926 when defining it, rather than defining two built-ins and using the
15927 overload table in rs6000-c.c to switch between the two. If we don't have
15928 the proper assembler, don't do this switch because CODE_FOR_*kf* and
15929 CODE_FOR_*tf* will be CODE_FOR_nothing. */
15930 if (FLOAT128_IEEE_P (TFmode))
15931 switch (icode)
15933 default:
15934 break;
15936 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
15937 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
15938 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
15939 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
15940 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
15941 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
15942 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
15943 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
15944 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
15945 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
15946 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
15947 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
15948 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
15951 if (TARGET_DEBUG_BUILTIN)
15953 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15954 const char *name2 = (icode != CODE_FOR_nothing)
15955 ? get_insn_name ((int) icode)
15956 : "nothing";
15957 const char *name3;
15959 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15961 default: name3 = "unknown"; break;
15962 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15963 case RS6000_BTC_UNARY: name3 = "unary"; break;
15964 case RS6000_BTC_BINARY: name3 = "binary"; break;
15965 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15966 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15967 case RS6000_BTC_ABS: name3 = "abs"; break;
15968 case RS6000_BTC_DST: name3 = "dst"; break;
15972 fprintf (stderr,
15973 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15974 (name1) ? name1 : "---", fcode,
15975 (name2) ? name2 : "---", (int) icode,
15976 name3,
15977 func_valid_p ? "" : ", not valid");
15980 if (!func_valid_p)
15982 rs6000_invalid_builtin (fcode);
15984 /* Given it is invalid, just generate a normal call. */
15985 return expand_call (exp, target, ignore);
15988 switch (fcode)
15990 case RS6000_BUILTIN_RECIP:
15991 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15993 case RS6000_BUILTIN_RECIPF:
15994 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15996 case RS6000_BUILTIN_RSQRTF:
15997 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15999 case RS6000_BUILTIN_RSQRT:
16000 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16002 case POWER7_BUILTIN_BPERMD:
16003 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16004 ? CODE_FOR_bpermd_di
16005 : CODE_FOR_bpermd_si), exp, target);
16007 case RS6000_BUILTIN_GET_TB:
16008 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16009 target);
16011 case RS6000_BUILTIN_MFTB:
16012 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16013 ? CODE_FOR_rs6000_mftb_di
16014 : CODE_FOR_rs6000_mftb_si),
16015 target);
16017 case RS6000_BUILTIN_MFFS:
16018 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16020 case RS6000_BUILTIN_MTFSB0:
16021 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16023 case RS6000_BUILTIN_MTFSB1:
16024 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16026 case RS6000_BUILTIN_SET_FPSCR_RN:
16027 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16028 exp);
16030 case RS6000_BUILTIN_SET_FPSCR_DRN:
16031 return
16032 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16033 exp);
16035 case RS6000_BUILTIN_MFFSL:
16036 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16038 case RS6000_BUILTIN_MTFSF:
16039 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16041 case RS6000_BUILTIN_CPU_INIT:
16042 case RS6000_BUILTIN_CPU_IS:
16043 case RS6000_BUILTIN_CPU_SUPPORTS:
16044 return cpu_expand_builtin (fcode, exp, target);
16046 case MISC_BUILTIN_SPEC_BARRIER:
16048 emit_insn (gen_speculation_barrier ());
16049 return NULL_RTX;
16052 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16053 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16055 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16056 : (int) CODE_FOR_altivec_lvsl_direct);
16057 machine_mode tmode = insn_data[icode2].operand[0].mode;
16058 machine_mode mode = insn_data[icode2].operand[1].mode;
16059 tree arg;
16060 rtx op, addr, pat;
16062 gcc_assert (TARGET_ALTIVEC);
16064 arg = CALL_EXPR_ARG (exp, 0);
16065 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16066 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16067 addr = memory_address (mode, op);
16068 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16069 op = addr;
16070 else
16072 /* For the load case need to negate the address. */
16073 op = gen_reg_rtx (GET_MODE (addr));
16074 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16076 op = gen_rtx_MEM (mode, op);
16078 if (target == 0
16079 || GET_MODE (target) != tmode
16080 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16081 target = gen_reg_rtx (tmode);
16083 pat = GEN_FCN (icode2) (target, op);
16084 if (!pat)
16085 return 0;
16086 emit_insn (pat);
16088 return target;
16091 case ALTIVEC_BUILTIN_VCFUX:
16092 case ALTIVEC_BUILTIN_VCFSX:
16093 case ALTIVEC_BUILTIN_VCTUXS:
16094 case ALTIVEC_BUILTIN_VCTSXS:
16095 /* FIXME: There's got to be a nicer way to handle this case than
16096 constructing a new CALL_EXPR. */
16097 if (call_expr_nargs (exp) == 1)
16099 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16100 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16102 break;
16104 /* For the pack and unpack int128 routines, fix up the builtin so it
16105 uses the correct IBM128 type. */
16106 case MISC_BUILTIN_PACK_IF:
16107 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16109 icode = CODE_FOR_packtf;
16110 fcode = MISC_BUILTIN_PACK_TF;
16111 uns_fcode = (size_t)fcode;
16113 break;
16115 case MISC_BUILTIN_UNPACK_IF:
16116 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16118 icode = CODE_FOR_unpacktf;
16119 fcode = MISC_BUILTIN_UNPACK_TF;
16120 uns_fcode = (size_t)fcode;
16122 break;
16124 default:
16125 break;
16128 if (TARGET_ALTIVEC)
16130 ret = altivec_expand_builtin (exp, target, &success);
16132 if (success)
16133 return ret;
16135 if (TARGET_HTM)
16137 ret = htm_expand_builtin (exp, target, &success);
16139 if (success)
16140 return ret;
16143 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16144 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16145 gcc_assert (attr == RS6000_BTC_UNARY
16146 || attr == RS6000_BTC_BINARY
16147 || attr == RS6000_BTC_TERNARY
16148 || attr == RS6000_BTC_SPECIAL);
16150 /* Handle simple unary operations. */
16151 d = bdesc_1arg;
16152 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16153 if (d->code == fcode)
16154 return rs6000_expand_unop_builtin (icode, exp, target);
16156 /* Handle simple binary operations. */
16157 d = bdesc_2arg;
16158 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16159 if (d->code == fcode)
16160 return rs6000_expand_binop_builtin (icode, exp, target);
16162 /* Handle simple ternary operations. */
16163 d = bdesc_3arg;
16164 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16165 if (d->code == fcode)
16166 return rs6000_expand_ternop_builtin (icode, exp, target);
16168 /* Handle simple no-argument operations. */
16169 d = bdesc_0arg;
16170 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16171 if (d->code == fcode)
16172 return rs6000_expand_zeroop_builtin (icode, target);
16174 gcc_unreachable ();
16177 /* Create a builtin vector type with a name. Taking care not to give
16178 the canonical type a name. */
16180 static tree
16181 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16183 tree result = build_vector_type (elt_type, num_elts);
16185 /* Copy so we don't give the canonical type a name. */
16186 result = build_variant_type_copy (result);
16188 add_builtin_type (name, result);
16190 return result;
16193 static void
16194 rs6000_init_builtins (void)
16196 tree tdecl;
16197 tree ftype;
16198 machine_mode mode;
16200 if (TARGET_DEBUG_BUILTIN)
16201 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16202 (TARGET_ALTIVEC) ? ", altivec" : "",
16203 (TARGET_VSX) ? ", vsx" : "");
16205 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16206 : "__vector long long",
16207 intDI_type_node, 2);
16208 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16209 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16210 intSI_type_node, 4);
16211 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16212 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16213 intHI_type_node, 8);
16214 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16215 intQI_type_node, 16);
16217 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16218 unsigned_intQI_type_node, 16);
16219 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16220 unsigned_intHI_type_node, 8);
16221 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16222 unsigned_intSI_type_node, 4);
16223 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16224 ? "__vector unsigned long"
16225 : "__vector unsigned long long",
16226 unsigned_intDI_type_node, 2);
16228 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16230 const_str_type_node
16231 = build_pointer_type (build_qualified_type (char_type_node,
16232 TYPE_QUAL_CONST));
16234 /* We use V1TI mode as a special container to hold __int128_t items that
16235 must live in VSX registers. */
16236 if (intTI_type_node)
16238 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16239 intTI_type_node, 1);
16240 unsigned_V1TI_type_node
16241 = rs6000_vector_type ("__vector unsigned __int128",
16242 unsigned_intTI_type_node, 1);
16245 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16246 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16247 'vector unsigned short'. */
16249 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16250 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16251 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16252 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16253 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16255 long_integer_type_internal_node = long_integer_type_node;
16256 long_unsigned_type_internal_node = long_unsigned_type_node;
16257 long_long_integer_type_internal_node = long_long_integer_type_node;
16258 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16259 intQI_type_internal_node = intQI_type_node;
16260 uintQI_type_internal_node = unsigned_intQI_type_node;
16261 intHI_type_internal_node = intHI_type_node;
16262 uintHI_type_internal_node = unsigned_intHI_type_node;
16263 intSI_type_internal_node = intSI_type_node;
16264 uintSI_type_internal_node = unsigned_intSI_type_node;
16265 intDI_type_internal_node = intDI_type_node;
16266 uintDI_type_internal_node = unsigned_intDI_type_node;
16267 intTI_type_internal_node = intTI_type_node;
16268 uintTI_type_internal_node = unsigned_intTI_type_node;
16269 float_type_internal_node = float_type_node;
16270 double_type_internal_node = double_type_node;
16271 long_double_type_internal_node = long_double_type_node;
16272 dfloat64_type_internal_node = dfloat64_type_node;
16273 dfloat128_type_internal_node = dfloat128_type_node;
16274 void_type_internal_node = void_type_node;
16276 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16277 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16278 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16279 format that uses a pair of doubles, depending on the switches and
16280 defaults.
16282 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16283 floating point, we need make sure the type is non-zero or else self-test
16284 fails during bootstrap.
16286 Always create __ibm128 as a separate type, even if the current long double
16287 format is IBM extended double.
16289 For IEEE 128-bit floating point, always create the type __ieee128. If the
16290 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16291 __ieee128. */
16292 if (TARGET_FLOAT128_TYPE)
16294 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16295 ibm128_float_type_node = long_double_type_node;
16296 else
16298 ibm128_float_type_node = make_node (REAL_TYPE);
16299 TYPE_PRECISION (ibm128_float_type_node) = 128;
16300 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16301 layout_type (ibm128_float_type_node);
16304 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16305 "__ibm128");
16307 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16308 ieee128_float_type_node = long_double_type_node;
16309 else
16310 ieee128_float_type_node = float128_type_node;
16312 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16313 "__ieee128");
16316 else
16317 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16319 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16320 tree type node. */
16321 builtin_mode_to_type[QImode][0] = integer_type_node;
16322 builtin_mode_to_type[HImode][0] = integer_type_node;
16323 builtin_mode_to_type[SImode][0] = intSI_type_node;
16324 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16325 builtin_mode_to_type[DImode][0] = intDI_type_node;
16326 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16327 builtin_mode_to_type[TImode][0] = intTI_type_node;
16328 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16329 builtin_mode_to_type[SFmode][0] = float_type_node;
16330 builtin_mode_to_type[DFmode][0] = double_type_node;
16331 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16332 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16333 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16334 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16335 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16336 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16337 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16338 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16339 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16340 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16341 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16342 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16343 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16344 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16345 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16346 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16347 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16349 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16350 TYPE_NAME (bool_char_type_node) = tdecl;
16352 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16353 TYPE_NAME (bool_short_type_node) = tdecl;
16355 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16356 TYPE_NAME (bool_int_type_node) = tdecl;
16358 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16359 TYPE_NAME (pixel_type_node) = tdecl;
16361 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16362 bool_char_type_node, 16);
16363 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16364 bool_short_type_node, 8);
16365 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16366 bool_int_type_node, 4);
16367 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16368 ? "__vector __bool long"
16369 : "__vector __bool long long",
16370 bool_long_long_type_node, 2);
16371 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16372 pixel_type_node, 8);
16374 /* Create Altivec and VSX builtins on machines with at least the
16375 general purpose extensions (970 and newer) to allow the use of
16376 the target attribute. */
16377 if (TARGET_EXTRA_BUILTINS)
16378 altivec_init_builtins ();
16379 if (TARGET_HTM)
16380 htm_init_builtins ();
16382 if (TARGET_EXTRA_BUILTINS)
16383 rs6000_common_init_builtins ();
16385 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16386 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16387 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16389 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16390 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16391 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16393 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16394 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16395 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16397 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16398 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16399 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16401 mode = (TARGET_64BIT) ? DImode : SImode;
16402 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16403 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16404 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16406 ftype = build_function_type_list (unsigned_intDI_type_node,
16407 NULL_TREE);
16408 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16410 if (TARGET_64BIT)
16411 ftype = build_function_type_list (unsigned_intDI_type_node,
16412 NULL_TREE);
16413 else
16414 ftype = build_function_type_list (unsigned_intSI_type_node,
16415 NULL_TREE);
16416 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16418 ftype = build_function_type_list (double_type_node, NULL_TREE);
16419 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16421 ftype = build_function_type_list (double_type_node, NULL_TREE);
16422 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16424 ftype = build_function_type_list (void_type_node,
16425 intSI_type_node,
16426 NULL_TREE);
16427 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16429 ftype = build_function_type_list (void_type_node,
16430 intSI_type_node,
16431 NULL_TREE);
16432 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16434 ftype = build_function_type_list (void_type_node,
16435 intDI_type_node,
16436 NULL_TREE);
16437 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16439 ftype = build_function_type_list (void_type_node,
16440 intDI_type_node,
16441 NULL_TREE);
16442 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16444 ftype = build_function_type_list (void_type_node,
16445 intSI_type_node, double_type_node,
16446 NULL_TREE);
16447 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16449 ftype = build_function_type_list (void_type_node, NULL_TREE);
16450 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16451 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16452 MISC_BUILTIN_SPEC_BARRIER);
16454 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16455 NULL_TREE);
16456 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16457 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16459 /* AIX libm provides clog as __clog. */
16460 if (TARGET_XCOFF &&
16461 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16462 set_user_assembler_name (tdecl, "__clog");
16464 #ifdef SUBTARGET_INIT_BUILTINS
16465 SUBTARGET_INIT_BUILTINS;
16466 #endif
16469 /* Returns the rs6000 builtin decl for CODE. */
16471 static tree
16472 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16474 HOST_WIDE_INT fnmask;
16476 if (code >= RS6000_BUILTIN_COUNT)
16477 return error_mark_node;
16479 fnmask = rs6000_builtin_info[code].mask;
16480 if ((fnmask & rs6000_builtin_mask) != fnmask)
16482 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16483 return error_mark_node;
16486 return rs6000_builtin_decls[code];
16489 static void
16490 altivec_init_builtins (void)
16492 const struct builtin_description *d;
16493 size_t i;
16494 tree ftype;
16495 tree decl;
16496 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16498 tree pvoid_type_node = build_pointer_type (void_type_node);
16500 tree pcvoid_type_node
16501 = build_pointer_type (build_qualified_type (void_type_node,
16502 TYPE_QUAL_CONST));
16504 tree int_ftype_opaque
16505 = build_function_type_list (integer_type_node,
16506 opaque_V4SI_type_node, NULL_TREE);
16507 tree opaque_ftype_opaque
16508 = build_function_type_list (integer_type_node, NULL_TREE);
16509 tree opaque_ftype_opaque_int
16510 = build_function_type_list (opaque_V4SI_type_node,
16511 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16512 tree opaque_ftype_opaque_opaque_int
16513 = build_function_type_list (opaque_V4SI_type_node,
16514 opaque_V4SI_type_node, opaque_V4SI_type_node,
16515 integer_type_node, NULL_TREE);
16516 tree opaque_ftype_opaque_opaque_opaque
16517 = build_function_type_list (opaque_V4SI_type_node,
16518 opaque_V4SI_type_node, opaque_V4SI_type_node,
16519 opaque_V4SI_type_node, NULL_TREE);
16520 tree opaque_ftype_opaque_opaque
16521 = build_function_type_list (opaque_V4SI_type_node,
16522 opaque_V4SI_type_node, opaque_V4SI_type_node,
16523 NULL_TREE);
16524 tree int_ftype_int_opaque_opaque
16525 = build_function_type_list (integer_type_node,
16526 integer_type_node, opaque_V4SI_type_node,
16527 opaque_V4SI_type_node, NULL_TREE);
16528 tree int_ftype_int_v4si_v4si
16529 = build_function_type_list (integer_type_node,
16530 integer_type_node, V4SI_type_node,
16531 V4SI_type_node, NULL_TREE);
16532 tree int_ftype_int_v2di_v2di
16533 = build_function_type_list (integer_type_node,
16534 integer_type_node, V2DI_type_node,
16535 V2DI_type_node, NULL_TREE);
16536 tree void_ftype_v4si
16537 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16538 tree v8hi_ftype_void
16539 = build_function_type_list (V8HI_type_node, NULL_TREE);
16540 tree void_ftype_void
16541 = build_function_type_list (void_type_node, NULL_TREE);
16542 tree void_ftype_int
16543 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16545 tree opaque_ftype_long_pcvoid
16546 = build_function_type_list (opaque_V4SI_type_node,
16547 long_integer_type_node, pcvoid_type_node,
16548 NULL_TREE);
16549 tree v16qi_ftype_long_pcvoid
16550 = build_function_type_list (V16QI_type_node,
16551 long_integer_type_node, pcvoid_type_node,
16552 NULL_TREE);
16553 tree v8hi_ftype_long_pcvoid
16554 = build_function_type_list (V8HI_type_node,
16555 long_integer_type_node, pcvoid_type_node,
16556 NULL_TREE);
16557 tree v4si_ftype_long_pcvoid
16558 = build_function_type_list (V4SI_type_node,
16559 long_integer_type_node, pcvoid_type_node,
16560 NULL_TREE);
16561 tree v4sf_ftype_long_pcvoid
16562 = build_function_type_list (V4SF_type_node,
16563 long_integer_type_node, pcvoid_type_node,
16564 NULL_TREE);
16565 tree v2df_ftype_long_pcvoid
16566 = build_function_type_list (V2DF_type_node,
16567 long_integer_type_node, pcvoid_type_node,
16568 NULL_TREE);
16569 tree v2di_ftype_long_pcvoid
16570 = build_function_type_list (V2DI_type_node,
16571 long_integer_type_node, pcvoid_type_node,
16572 NULL_TREE);
16573 tree v1ti_ftype_long_pcvoid
16574 = build_function_type_list (V1TI_type_node,
16575 long_integer_type_node, pcvoid_type_node,
16576 NULL_TREE);
16578 tree void_ftype_opaque_long_pvoid
16579 = build_function_type_list (void_type_node,
16580 opaque_V4SI_type_node, long_integer_type_node,
16581 pvoid_type_node, NULL_TREE);
16582 tree void_ftype_v4si_long_pvoid
16583 = build_function_type_list (void_type_node,
16584 V4SI_type_node, long_integer_type_node,
16585 pvoid_type_node, NULL_TREE);
16586 tree void_ftype_v16qi_long_pvoid
16587 = build_function_type_list (void_type_node,
16588 V16QI_type_node, long_integer_type_node,
16589 pvoid_type_node, NULL_TREE);
16591 tree void_ftype_v16qi_pvoid_long
16592 = build_function_type_list (void_type_node,
16593 V16QI_type_node, pvoid_type_node,
16594 long_integer_type_node, NULL_TREE);
16596 tree void_ftype_v8hi_long_pvoid
16597 = build_function_type_list (void_type_node,
16598 V8HI_type_node, long_integer_type_node,
16599 pvoid_type_node, NULL_TREE);
16600 tree void_ftype_v4sf_long_pvoid
16601 = build_function_type_list (void_type_node,
16602 V4SF_type_node, long_integer_type_node,
16603 pvoid_type_node, NULL_TREE);
16604 tree void_ftype_v2df_long_pvoid
16605 = build_function_type_list (void_type_node,
16606 V2DF_type_node, long_integer_type_node,
16607 pvoid_type_node, NULL_TREE);
16608 tree void_ftype_v1ti_long_pvoid
16609 = build_function_type_list (void_type_node,
16610 V1TI_type_node, long_integer_type_node,
16611 pvoid_type_node, NULL_TREE);
16612 tree void_ftype_v2di_long_pvoid
16613 = build_function_type_list (void_type_node,
16614 V2DI_type_node, long_integer_type_node,
16615 pvoid_type_node, NULL_TREE);
16616 tree int_ftype_int_v8hi_v8hi
16617 = build_function_type_list (integer_type_node,
16618 integer_type_node, V8HI_type_node,
16619 V8HI_type_node, NULL_TREE);
16620 tree int_ftype_int_v16qi_v16qi
16621 = build_function_type_list (integer_type_node,
16622 integer_type_node, V16QI_type_node,
16623 V16QI_type_node, NULL_TREE);
16624 tree int_ftype_int_v4sf_v4sf
16625 = build_function_type_list (integer_type_node,
16626 integer_type_node, V4SF_type_node,
16627 V4SF_type_node, NULL_TREE);
16628 tree int_ftype_int_v2df_v2df
16629 = build_function_type_list (integer_type_node,
16630 integer_type_node, V2DF_type_node,
16631 V2DF_type_node, NULL_TREE);
16632 tree v2di_ftype_v2di
16633 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16634 tree v4si_ftype_v4si
16635 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16636 tree v8hi_ftype_v8hi
16637 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16638 tree v16qi_ftype_v16qi
16639 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16640 tree v4sf_ftype_v4sf
16641 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16642 tree v2df_ftype_v2df
16643 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16644 tree void_ftype_pcvoid_int_int
16645 = build_function_type_list (void_type_node,
16646 pcvoid_type_node, integer_type_node,
16647 integer_type_node, NULL_TREE);
16649 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16650 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16651 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16652 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16653 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16654 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16655 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16656 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16657 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16658 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16659 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16660 ALTIVEC_BUILTIN_LVXL_V2DF);
16661 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16662 ALTIVEC_BUILTIN_LVXL_V2DI);
16663 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16664 ALTIVEC_BUILTIN_LVXL_V4SF);
16665 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16666 ALTIVEC_BUILTIN_LVXL_V4SI);
16667 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16668 ALTIVEC_BUILTIN_LVXL_V8HI);
16669 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16670 ALTIVEC_BUILTIN_LVXL_V16QI);
16671 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16672 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16673 ALTIVEC_BUILTIN_LVX_V1TI);
16674 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16675 ALTIVEC_BUILTIN_LVX_V2DF);
16676 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16677 ALTIVEC_BUILTIN_LVX_V2DI);
16678 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16679 ALTIVEC_BUILTIN_LVX_V4SF);
16680 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16681 ALTIVEC_BUILTIN_LVX_V4SI);
16682 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16683 ALTIVEC_BUILTIN_LVX_V8HI);
16684 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16685 ALTIVEC_BUILTIN_LVX_V16QI);
16686 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16687 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16688 ALTIVEC_BUILTIN_STVX_V2DF);
16689 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16690 ALTIVEC_BUILTIN_STVX_V2DI);
16691 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16692 ALTIVEC_BUILTIN_STVX_V4SF);
16693 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16694 ALTIVEC_BUILTIN_STVX_V4SI);
16695 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16696 ALTIVEC_BUILTIN_STVX_V8HI);
16697 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16698 ALTIVEC_BUILTIN_STVX_V16QI);
16699 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16700 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16701 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16702 ALTIVEC_BUILTIN_STVXL_V2DF);
16703 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16704 ALTIVEC_BUILTIN_STVXL_V2DI);
16705 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16706 ALTIVEC_BUILTIN_STVXL_V4SF);
16707 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16708 ALTIVEC_BUILTIN_STVXL_V4SI);
16709 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16710 ALTIVEC_BUILTIN_STVXL_V8HI);
16711 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16712 ALTIVEC_BUILTIN_STVXL_V16QI);
16713 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16714 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16715 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16716 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16717 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16718 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16719 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16720 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16721 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16722 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16723 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16724 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16725 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16726 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16727 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16728 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16730 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16731 VSX_BUILTIN_LXVD2X_V2DF);
16732 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16733 VSX_BUILTIN_LXVD2X_V2DI);
16734 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16735 VSX_BUILTIN_LXVW4X_V4SF);
16736 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16737 VSX_BUILTIN_LXVW4X_V4SI);
16738 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16739 VSX_BUILTIN_LXVW4X_V8HI);
16740 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16741 VSX_BUILTIN_LXVW4X_V16QI);
16742 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16743 VSX_BUILTIN_STXVD2X_V2DF);
16744 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16745 VSX_BUILTIN_STXVD2X_V2DI);
16746 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16747 VSX_BUILTIN_STXVW4X_V4SF);
16748 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16749 VSX_BUILTIN_STXVW4X_V4SI);
16750 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16751 VSX_BUILTIN_STXVW4X_V8HI);
16752 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16753 VSX_BUILTIN_STXVW4X_V16QI);
16755 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16756 VSX_BUILTIN_LD_ELEMREV_V2DF);
16757 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16758 VSX_BUILTIN_LD_ELEMREV_V2DI);
16759 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16760 VSX_BUILTIN_LD_ELEMREV_V4SF);
16761 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16762 VSX_BUILTIN_LD_ELEMREV_V4SI);
16763 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16764 VSX_BUILTIN_LD_ELEMREV_V8HI);
16765 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16766 VSX_BUILTIN_LD_ELEMREV_V16QI);
16767 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16768 VSX_BUILTIN_ST_ELEMREV_V2DF);
16769 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16770 VSX_BUILTIN_ST_ELEMREV_V1TI);
16771 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16772 VSX_BUILTIN_ST_ELEMREV_V2DI);
16773 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16774 VSX_BUILTIN_ST_ELEMREV_V4SF);
16775 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16776 VSX_BUILTIN_ST_ELEMREV_V4SI);
16777 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16778 VSX_BUILTIN_ST_ELEMREV_V8HI);
16779 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16780 VSX_BUILTIN_ST_ELEMREV_V16QI);
16782 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16783 VSX_BUILTIN_VEC_LD);
16784 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16785 VSX_BUILTIN_VEC_ST);
16786 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16787 VSX_BUILTIN_VEC_XL);
16788 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16789 VSX_BUILTIN_VEC_XL_BE);
16790 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16791 VSX_BUILTIN_VEC_XST);
16792 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16793 VSX_BUILTIN_VEC_XST_BE);
16795 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16796 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16797 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16799 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16800 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16801 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16802 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16803 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16804 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16805 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16806 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16807 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16808 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16809 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16810 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16812 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16813 ALTIVEC_BUILTIN_VEC_ADDE);
16814 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16815 ALTIVEC_BUILTIN_VEC_ADDEC);
16816 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16817 ALTIVEC_BUILTIN_VEC_CMPNE);
16818 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16819 ALTIVEC_BUILTIN_VEC_MUL);
16820 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16821 ALTIVEC_BUILTIN_VEC_SUBE);
16822 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16823 ALTIVEC_BUILTIN_VEC_SUBEC);
16825 /* Cell builtins. */
16826 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16827 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16828 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16829 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16831 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16832 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16833 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16834 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16836 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16837 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16838 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16839 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16841 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16842 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16843 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16844 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16846 if (TARGET_P9_VECTOR)
16848 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16849 P9V_BUILTIN_STXVL);
16850 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16851 P9V_BUILTIN_XST_LEN_R);
16854 /* Add the DST variants. */
16855 d = bdesc_dst;
16856 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16858 HOST_WIDE_INT mask = d->mask;
16860 /* It is expected that these dst built-in functions may have
16861 d->icode equal to CODE_FOR_nothing. */
16862 if ((mask & builtin_mask) != mask)
16864 if (TARGET_DEBUG_BUILTIN)
16865 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16866 d->name);
16867 continue;
16869 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16872 /* Initialize the predicates. */
16873 d = bdesc_altivec_preds;
16874 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16876 machine_mode mode1;
16877 tree type;
16878 HOST_WIDE_INT mask = d->mask;
16880 if ((mask & builtin_mask) != mask)
16882 if (TARGET_DEBUG_BUILTIN)
16883 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16884 d->name);
16885 continue;
16888 if (rs6000_overloaded_builtin_p (d->code))
16889 mode1 = VOIDmode;
16890 else
16892 /* Cannot define builtin if the instruction is disabled. */
16893 gcc_assert (d->icode != CODE_FOR_nothing);
16894 mode1 = insn_data[d->icode].operand[1].mode;
16897 switch (mode1)
16899 case E_VOIDmode:
16900 type = int_ftype_int_opaque_opaque;
16901 break;
16902 case E_V2DImode:
16903 type = int_ftype_int_v2di_v2di;
16904 break;
16905 case E_V4SImode:
16906 type = int_ftype_int_v4si_v4si;
16907 break;
16908 case E_V8HImode:
16909 type = int_ftype_int_v8hi_v8hi;
16910 break;
16911 case E_V16QImode:
16912 type = int_ftype_int_v16qi_v16qi;
16913 break;
16914 case E_V4SFmode:
16915 type = int_ftype_int_v4sf_v4sf;
16916 break;
16917 case E_V2DFmode:
16918 type = int_ftype_int_v2df_v2df;
16919 break;
16920 default:
16921 gcc_unreachable ();
16924 def_builtin (d->name, type, d->code);
16927 /* Initialize the abs* operators. */
16928 d = bdesc_abs;
16929 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16931 machine_mode mode0;
16932 tree type;
16933 HOST_WIDE_INT mask = d->mask;
16935 if ((mask & builtin_mask) != mask)
16937 if (TARGET_DEBUG_BUILTIN)
16938 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
16939 d->name);
16940 continue;
16943 /* Cannot define builtin if the instruction is disabled. */
16944 gcc_assert (d->icode != CODE_FOR_nothing);
16945 mode0 = insn_data[d->icode].operand[0].mode;
16947 switch (mode0)
16949 case E_V2DImode:
16950 type = v2di_ftype_v2di;
16951 break;
16952 case E_V4SImode:
16953 type = v4si_ftype_v4si;
16954 break;
16955 case E_V8HImode:
16956 type = v8hi_ftype_v8hi;
16957 break;
16958 case E_V16QImode:
16959 type = v16qi_ftype_v16qi;
16960 break;
16961 case E_V4SFmode:
16962 type = v4sf_ftype_v4sf;
16963 break;
16964 case E_V2DFmode:
16965 type = v2df_ftype_v2df;
16966 break;
16967 default:
16968 gcc_unreachable ();
16971 def_builtin (d->name, type, d->code);
16974 /* Initialize target builtin that implements
16975 targetm.vectorize.builtin_mask_for_load. */
16977 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16978 v16qi_ftype_long_pcvoid,
16979 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16980 BUILT_IN_MD, NULL, NULL_TREE);
16981 TREE_READONLY (decl) = 1;
16982 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16983 altivec_builtin_mask_for_load = decl;
16985 /* Access to the vec_init patterns. */
16986 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16987 integer_type_node, integer_type_node,
16988 integer_type_node, NULL_TREE);
16989 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16991 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16992 short_integer_type_node,
16993 short_integer_type_node,
16994 short_integer_type_node,
16995 short_integer_type_node,
16996 short_integer_type_node,
16997 short_integer_type_node,
16998 short_integer_type_node, NULL_TREE);
16999 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17001 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17002 char_type_node, char_type_node,
17003 char_type_node, char_type_node,
17004 char_type_node, char_type_node,
17005 char_type_node, char_type_node,
17006 char_type_node, char_type_node,
17007 char_type_node, char_type_node,
17008 char_type_node, char_type_node,
17009 char_type_node, NULL_TREE);
17010 def_builtin ("__builtin_vec_init_v16qi", ftype,
17011 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17013 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17014 float_type_node, float_type_node,
17015 float_type_node, NULL_TREE);
17016 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17018 /* VSX builtins. */
17019 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17020 double_type_node, NULL_TREE);
17021 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17023 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17024 intDI_type_node, NULL_TREE);
17025 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17027 /* Access to the vec_set patterns. */
17028 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17029 intSI_type_node,
17030 integer_type_node, NULL_TREE);
17031 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17033 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17034 intHI_type_node,
17035 integer_type_node, NULL_TREE);
17036 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17038 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17039 intQI_type_node,
17040 integer_type_node, NULL_TREE);
17041 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17043 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17044 float_type_node,
17045 integer_type_node, NULL_TREE);
17046 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17048 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17049 double_type_node,
17050 integer_type_node, NULL_TREE);
17051 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17053 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17054 intDI_type_node,
17055 integer_type_node, NULL_TREE);
17056 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17058 /* Access to the vec_extract patterns. */
17059 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17060 integer_type_node, NULL_TREE);
17061 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17063 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17064 integer_type_node, NULL_TREE);
17065 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17067 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17068 integer_type_node, NULL_TREE);
17069 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17071 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17072 integer_type_node, NULL_TREE);
17073 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17075 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17076 integer_type_node, NULL_TREE);
17077 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17079 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17080 integer_type_node, NULL_TREE);
17081 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17084 if (V1TI_type_node)
17086 tree v1ti_ftype_long_pcvoid
17087 = build_function_type_list (V1TI_type_node,
17088 long_integer_type_node, pcvoid_type_node,
17089 NULL_TREE);
17090 tree void_ftype_v1ti_long_pvoid
17091 = build_function_type_list (void_type_node,
17092 V1TI_type_node, long_integer_type_node,
17093 pvoid_type_node, NULL_TREE);
17094 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17095 VSX_BUILTIN_LD_ELEMREV_V1TI);
17096 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17097 VSX_BUILTIN_LXVD2X_V1TI);
17098 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17099 VSX_BUILTIN_STXVD2X_V1TI);
17100 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17101 NULL_TREE, NULL_TREE);
17102 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17103 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17104 intTI_type_node,
17105 integer_type_node, NULL_TREE);
17106 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17107 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17108 integer_type_node, NULL_TREE);
17109 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17114 static void
17115 htm_init_builtins (void)
17117 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17118 const struct builtin_description *d;
17119 size_t i;
17121 d = bdesc_htm;
17122 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17124 tree op[MAX_HTM_OPERANDS], type;
17125 HOST_WIDE_INT mask = d->mask;
17126 unsigned attr = rs6000_builtin_info[d->code].attr;
17127 bool void_func = (attr & RS6000_BTC_VOID);
17128 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17129 int nopnds = 0;
17130 tree gpr_type_node;
17131 tree rettype;
17132 tree argtype;
17134 /* It is expected that these htm built-in functions may have
17135 d->icode equal to CODE_FOR_nothing. */
17137 if (TARGET_32BIT && TARGET_POWERPC64)
17138 gpr_type_node = long_long_unsigned_type_node;
17139 else
17140 gpr_type_node = long_unsigned_type_node;
17142 if (attr & RS6000_BTC_SPR)
17144 rettype = gpr_type_node;
17145 argtype = gpr_type_node;
17147 else if (d->code == HTM_BUILTIN_TABORTDC
17148 || d->code == HTM_BUILTIN_TABORTDCI)
17150 rettype = unsigned_type_node;
17151 argtype = gpr_type_node;
17153 else
17155 rettype = unsigned_type_node;
17156 argtype = unsigned_type_node;
17159 if ((mask & builtin_mask) != mask)
17161 if (TARGET_DEBUG_BUILTIN)
17162 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17163 continue;
17166 if (d->name == 0)
17168 if (TARGET_DEBUG_BUILTIN)
17169 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17170 (long unsigned) i);
17171 continue;
17174 op[nopnds++] = (void_func) ? void_type_node : rettype;
17176 if (attr_args == RS6000_BTC_UNARY)
17177 op[nopnds++] = argtype;
17178 else if (attr_args == RS6000_BTC_BINARY)
17180 op[nopnds++] = argtype;
17181 op[nopnds++] = argtype;
17183 else if (attr_args == RS6000_BTC_TERNARY)
17185 op[nopnds++] = argtype;
17186 op[nopnds++] = argtype;
17187 op[nopnds++] = argtype;
17190 switch (nopnds)
17192 case 1:
17193 type = build_function_type_list (op[0], NULL_TREE);
17194 break;
17195 case 2:
17196 type = build_function_type_list (op[0], op[1], NULL_TREE);
17197 break;
17198 case 3:
17199 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17200 break;
17201 case 4:
17202 type = build_function_type_list (op[0], op[1], op[2], op[3],
17203 NULL_TREE);
17204 break;
17205 default:
17206 gcc_unreachable ();
17209 def_builtin (d->name, type, d->code);
17213 /* Hash function for builtin functions with up to 3 arguments and a return
17214 type. */
17215 hashval_t
17216 builtin_hasher::hash (builtin_hash_struct *bh)
17218 unsigned ret = 0;
17219 int i;
17221 for (i = 0; i < 4; i++)
17223 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17224 ret = (ret * 2) + bh->uns_p[i];
17227 return ret;
17230 /* Compare builtin hash entries H1 and H2 for equivalence. */
17231 bool
17232 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17234 return ((p1->mode[0] == p2->mode[0])
17235 && (p1->mode[1] == p2->mode[1])
17236 && (p1->mode[2] == p2->mode[2])
17237 && (p1->mode[3] == p2->mode[3])
17238 && (p1->uns_p[0] == p2->uns_p[0])
17239 && (p1->uns_p[1] == p2->uns_p[1])
17240 && (p1->uns_p[2] == p2->uns_p[2])
17241 && (p1->uns_p[3] == p2->uns_p[3]));
17244 /* Map types for builtin functions with an explicit return type and up to 3
17245 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17246 of the argument. */
17247 static tree
17248 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17249 machine_mode mode_arg1, machine_mode mode_arg2,
17250 enum rs6000_builtins builtin, const char *name)
17252 struct builtin_hash_struct h;
17253 struct builtin_hash_struct *h2;
17254 int num_args = 3;
17255 int i;
17256 tree ret_type = NULL_TREE;
17257 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17259 /* Create builtin_hash_table. */
17260 if (builtin_hash_table == NULL)
17261 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17263 h.type = NULL_TREE;
17264 h.mode[0] = mode_ret;
17265 h.mode[1] = mode_arg0;
17266 h.mode[2] = mode_arg1;
17267 h.mode[3] = mode_arg2;
17268 h.uns_p[0] = 0;
17269 h.uns_p[1] = 0;
17270 h.uns_p[2] = 0;
17271 h.uns_p[3] = 0;
17273 /* If the builtin is a type that produces unsigned results or takes unsigned
17274 arguments, and it is returned as a decl for the vectorizer (such as
17275 widening multiplies, permute), make sure the arguments and return value
17276 are type correct. */
17277 switch (builtin)
17279 /* unsigned 1 argument functions. */
17280 case CRYPTO_BUILTIN_VSBOX:
17281 case CRYPTO_BUILTIN_VSBOX_BE:
17282 case P8V_BUILTIN_VGBBD:
17283 case MISC_BUILTIN_CDTBCD:
17284 case MISC_BUILTIN_CBCDTD:
17285 h.uns_p[0] = 1;
17286 h.uns_p[1] = 1;
17287 break;
17289 /* unsigned 2 argument functions. */
17290 case ALTIVEC_BUILTIN_VMULEUB:
17291 case ALTIVEC_BUILTIN_VMULEUH:
17292 case P8V_BUILTIN_VMULEUW:
17293 case ALTIVEC_BUILTIN_VMULOUB:
17294 case ALTIVEC_BUILTIN_VMULOUH:
17295 case P8V_BUILTIN_VMULOUW:
17296 case CRYPTO_BUILTIN_VCIPHER:
17297 case CRYPTO_BUILTIN_VCIPHER_BE:
17298 case CRYPTO_BUILTIN_VCIPHERLAST:
17299 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17300 case CRYPTO_BUILTIN_VNCIPHER:
17301 case CRYPTO_BUILTIN_VNCIPHER_BE:
17302 case CRYPTO_BUILTIN_VNCIPHERLAST:
17303 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17304 case CRYPTO_BUILTIN_VPMSUMB:
17305 case CRYPTO_BUILTIN_VPMSUMH:
17306 case CRYPTO_BUILTIN_VPMSUMW:
17307 case CRYPTO_BUILTIN_VPMSUMD:
17308 case CRYPTO_BUILTIN_VPMSUM:
17309 case MISC_BUILTIN_ADDG6S:
17310 case MISC_BUILTIN_DIVWEU:
17311 case MISC_BUILTIN_DIVDEU:
17312 case VSX_BUILTIN_UDIV_V2DI:
17313 case ALTIVEC_BUILTIN_VMAXUB:
17314 case ALTIVEC_BUILTIN_VMINUB:
17315 case ALTIVEC_BUILTIN_VMAXUH:
17316 case ALTIVEC_BUILTIN_VMINUH:
17317 case ALTIVEC_BUILTIN_VMAXUW:
17318 case ALTIVEC_BUILTIN_VMINUW:
17319 case P8V_BUILTIN_VMAXUD:
17320 case P8V_BUILTIN_VMINUD:
17321 h.uns_p[0] = 1;
17322 h.uns_p[1] = 1;
17323 h.uns_p[2] = 1;
17324 break;
17326 /* unsigned 3 argument functions. */
17327 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17328 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17329 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17330 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17331 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17332 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17333 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17334 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17335 case VSX_BUILTIN_VPERM_16QI_UNS:
17336 case VSX_BUILTIN_VPERM_8HI_UNS:
17337 case VSX_BUILTIN_VPERM_4SI_UNS:
17338 case VSX_BUILTIN_VPERM_2DI_UNS:
17339 case VSX_BUILTIN_XXSEL_16QI_UNS:
17340 case VSX_BUILTIN_XXSEL_8HI_UNS:
17341 case VSX_BUILTIN_XXSEL_4SI_UNS:
17342 case VSX_BUILTIN_XXSEL_2DI_UNS:
17343 case CRYPTO_BUILTIN_VPERMXOR:
17344 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17345 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17346 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17347 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17348 case CRYPTO_BUILTIN_VSHASIGMAW:
17349 case CRYPTO_BUILTIN_VSHASIGMAD:
17350 case CRYPTO_BUILTIN_VSHASIGMA:
17351 h.uns_p[0] = 1;
17352 h.uns_p[1] = 1;
17353 h.uns_p[2] = 1;
17354 h.uns_p[3] = 1;
17355 break;
17357 /* signed permute functions with unsigned char mask. */
17358 case ALTIVEC_BUILTIN_VPERM_16QI:
17359 case ALTIVEC_BUILTIN_VPERM_8HI:
17360 case ALTIVEC_BUILTIN_VPERM_4SI:
17361 case ALTIVEC_BUILTIN_VPERM_4SF:
17362 case ALTIVEC_BUILTIN_VPERM_2DI:
17363 case ALTIVEC_BUILTIN_VPERM_2DF:
17364 case VSX_BUILTIN_VPERM_16QI:
17365 case VSX_BUILTIN_VPERM_8HI:
17366 case VSX_BUILTIN_VPERM_4SI:
17367 case VSX_BUILTIN_VPERM_4SF:
17368 case VSX_BUILTIN_VPERM_2DI:
17369 case VSX_BUILTIN_VPERM_2DF:
17370 h.uns_p[3] = 1;
17371 break;
17373 /* unsigned args, signed return. */
17374 case VSX_BUILTIN_XVCVUXDSP:
17375 case VSX_BUILTIN_XVCVUXDDP_UNS:
17376 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17377 h.uns_p[1] = 1;
17378 break;
17380 /* signed args, unsigned return. */
17381 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17382 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17383 case MISC_BUILTIN_UNPACK_TD:
17384 case MISC_BUILTIN_UNPACK_V1TI:
17385 h.uns_p[0] = 1;
17386 break;
17388 /* unsigned arguments, bool return (compares). */
17389 case ALTIVEC_BUILTIN_VCMPEQUB:
17390 case ALTIVEC_BUILTIN_VCMPEQUH:
17391 case ALTIVEC_BUILTIN_VCMPEQUW:
17392 case P8V_BUILTIN_VCMPEQUD:
17393 case VSX_BUILTIN_CMPGE_U16QI:
17394 case VSX_BUILTIN_CMPGE_U8HI:
17395 case VSX_BUILTIN_CMPGE_U4SI:
17396 case VSX_BUILTIN_CMPGE_U2DI:
17397 case ALTIVEC_BUILTIN_VCMPGTUB:
17398 case ALTIVEC_BUILTIN_VCMPGTUH:
17399 case ALTIVEC_BUILTIN_VCMPGTUW:
17400 case P8V_BUILTIN_VCMPGTUD:
17401 h.uns_p[1] = 1;
17402 h.uns_p[2] = 1;
17403 break;
17405 /* unsigned arguments for 128-bit pack instructions. */
17406 case MISC_BUILTIN_PACK_TD:
17407 case MISC_BUILTIN_PACK_V1TI:
17408 h.uns_p[1] = 1;
17409 h.uns_p[2] = 1;
17410 break;
17412 /* unsigned second arguments (vector shift right). */
17413 case ALTIVEC_BUILTIN_VSRB:
17414 case ALTIVEC_BUILTIN_VSRH:
17415 case ALTIVEC_BUILTIN_VSRW:
17416 case P8V_BUILTIN_VSRD:
17417 h.uns_p[2] = 1;
17418 break;
17420 default:
17421 break;
17424 /* Figure out how many args are present. */
17425 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17426 num_args--;
17428 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17429 if (!ret_type && h.uns_p[0])
17430 ret_type = builtin_mode_to_type[h.mode[0]][0];
17432 if (!ret_type)
17433 fatal_error (input_location,
17434 "internal error: builtin function %qs had an unexpected "
17435 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17437 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17438 arg_type[i] = NULL_TREE;
17440 for (i = 0; i < num_args; i++)
17442 int m = (int) h.mode[i+1];
17443 int uns_p = h.uns_p[i+1];
17445 arg_type[i] = builtin_mode_to_type[m][uns_p];
17446 if (!arg_type[i] && uns_p)
17447 arg_type[i] = builtin_mode_to_type[m][0];
17449 if (!arg_type[i])
17450 fatal_error (input_location,
17451 "internal error: builtin function %qs, argument %d "
17452 "had unexpected argument type %qs", name, i,
17453 GET_MODE_NAME (m));
17456 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17457 if (*found == NULL)
17459 h2 = ggc_alloc<builtin_hash_struct> ();
17460 *h2 = h;
17461 *found = h2;
17463 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17464 arg_type[2], NULL_TREE);
17467 return (*found)->type;
17470 static void
17471 rs6000_common_init_builtins (void)
17473 const struct builtin_description *d;
17474 size_t i;
17476 tree opaque_ftype_opaque = NULL_TREE;
17477 tree opaque_ftype_opaque_opaque = NULL_TREE;
17478 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17479 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17481 /* Create Altivec and VSX builtins on machines with at least the
17482 general purpose extensions (970 and newer) to allow the use of
17483 the target attribute. */
17485 if (TARGET_EXTRA_BUILTINS)
17486 builtin_mask |= RS6000_BTM_COMMON;
17488 /* Add the ternary operators. */
17489 d = bdesc_3arg;
17490 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17492 tree type;
17493 HOST_WIDE_INT mask = d->mask;
17495 if ((mask & builtin_mask) != mask)
17497 if (TARGET_DEBUG_BUILTIN)
17498 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17499 continue;
17502 if (rs6000_overloaded_builtin_p (d->code))
17504 if (! (type = opaque_ftype_opaque_opaque_opaque))
17505 type = opaque_ftype_opaque_opaque_opaque
17506 = build_function_type_list (opaque_V4SI_type_node,
17507 opaque_V4SI_type_node,
17508 opaque_V4SI_type_node,
17509 opaque_V4SI_type_node,
17510 NULL_TREE);
17512 else
17514 enum insn_code icode = d->icode;
17515 if (d->name == 0)
17517 if (TARGET_DEBUG_BUILTIN)
17518 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17519 (long unsigned)i);
17521 continue;
17524 if (icode == CODE_FOR_nothing)
17526 if (TARGET_DEBUG_BUILTIN)
17527 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17528 d->name);
17530 continue;
17533 type = builtin_function_type (insn_data[icode].operand[0].mode,
17534 insn_data[icode].operand[1].mode,
17535 insn_data[icode].operand[2].mode,
17536 insn_data[icode].operand[3].mode,
17537 d->code, d->name);
17540 def_builtin (d->name, type, d->code);
17543 /* Add the binary operators. */
17544 d = bdesc_2arg;
17545 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17547 machine_mode mode0, mode1, mode2;
17548 tree type;
17549 HOST_WIDE_INT mask = d->mask;
17551 if ((mask & builtin_mask) != mask)
17553 if (TARGET_DEBUG_BUILTIN)
17554 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17555 continue;
17558 if (rs6000_overloaded_builtin_p (d->code))
17560 if (! (type = opaque_ftype_opaque_opaque))
17561 type = opaque_ftype_opaque_opaque
17562 = build_function_type_list (opaque_V4SI_type_node,
17563 opaque_V4SI_type_node,
17564 opaque_V4SI_type_node,
17565 NULL_TREE);
17567 else
17569 enum insn_code icode = d->icode;
17570 if (d->name == 0)
17572 if (TARGET_DEBUG_BUILTIN)
17573 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17574 (long unsigned)i);
17576 continue;
17579 if (icode == CODE_FOR_nothing)
17581 if (TARGET_DEBUG_BUILTIN)
17582 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17583 d->name);
17585 continue;
17588 mode0 = insn_data[icode].operand[0].mode;
17589 mode1 = insn_data[icode].operand[1].mode;
17590 mode2 = insn_data[icode].operand[2].mode;
17592 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17593 d->code, d->name);
17596 def_builtin (d->name, type, d->code);
17599 /* Add the simple unary operators. */
17600 d = bdesc_1arg;
17601 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17603 machine_mode mode0, mode1;
17604 tree type;
17605 HOST_WIDE_INT mask = d->mask;
17607 if ((mask & builtin_mask) != mask)
17609 if (TARGET_DEBUG_BUILTIN)
17610 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17611 continue;
17614 if (rs6000_overloaded_builtin_p (d->code))
17616 if (! (type = opaque_ftype_opaque))
17617 type = opaque_ftype_opaque
17618 = build_function_type_list (opaque_V4SI_type_node,
17619 opaque_V4SI_type_node,
17620 NULL_TREE);
17622 else
17624 enum insn_code icode = d->icode;
17625 if (d->name == 0)
17627 if (TARGET_DEBUG_BUILTIN)
17628 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17629 (long unsigned)i);
17631 continue;
17634 if (icode == CODE_FOR_nothing)
17636 if (TARGET_DEBUG_BUILTIN)
17637 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17638 d->name);
17640 continue;
17643 mode0 = insn_data[icode].operand[0].mode;
17644 mode1 = insn_data[icode].operand[1].mode;
17646 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17647 d->code, d->name);
17650 def_builtin (d->name, type, d->code);
17653 /* Add the simple no-argument operators. */
17654 d = bdesc_0arg;
17655 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17657 machine_mode mode0;
17658 tree type;
17659 HOST_WIDE_INT mask = d->mask;
17661 if ((mask & builtin_mask) != mask)
17663 if (TARGET_DEBUG_BUILTIN)
17664 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17665 continue;
17667 if (rs6000_overloaded_builtin_p (d->code))
17669 if (!opaque_ftype_opaque)
17670 opaque_ftype_opaque
17671 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17672 type = opaque_ftype_opaque;
17674 else
17676 enum insn_code icode = d->icode;
17677 if (d->name == 0)
17679 if (TARGET_DEBUG_BUILTIN)
17680 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17681 (long unsigned) i);
17682 continue;
17684 if (icode == CODE_FOR_nothing)
17686 if (TARGET_DEBUG_BUILTIN)
17687 fprintf (stderr,
17688 "rs6000_builtin, skip no-argument %s (no code)\n",
17689 d->name);
17690 continue;
17692 mode0 = insn_data[icode].operand[0].mode;
17693 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17694 d->code, d->name);
17696 def_builtin (d->name, type, d->code);
17700 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17701 static void
17702 init_float128_ibm (machine_mode mode)
17704 if (!TARGET_XL_COMPAT)
17706 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17707 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17708 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17709 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17711 if (!TARGET_HARD_FLOAT)
17713 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17714 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17715 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17716 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17717 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17718 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17719 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17720 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17722 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17723 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17724 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17725 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17726 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17727 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17728 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17729 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17732 else
17734 set_optab_libfunc (add_optab, mode, "_xlqadd");
17735 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17736 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17737 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17740 /* Add various conversions for IFmode to use the traditional TFmode
17741 names. */
17742 if (mode == IFmode)
17744 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17745 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17746 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17747 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17748 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17749 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17751 if (TARGET_POWERPC64)
17753 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17754 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17755 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17756 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17761 /* Create a decl for either complex long double multiply or complex long double
17762 divide when long double is IEEE 128-bit floating point. We can't use
17763 __multc3 and __divtc3 because the original long double using IBM extended
17764 double used those names. The complex multiply/divide functions are encoded
17765 as builtin functions with a complex result and 4 scalar inputs. */
17767 static void
17768 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17770 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17771 name, NULL_TREE);
17773 set_builtin_decl (fncode, fndecl, true);
17775 if (TARGET_DEBUG_BUILTIN)
17776 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17778 return;
17781 /* Set up IEEE 128-bit floating point routines. Use different names if the
17782 arguments can be passed in a vector register. The historical PowerPC
17783 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17784 continue to use that if we aren't using vector registers to pass IEEE
17785 128-bit floating point. */
17787 static void
17788 init_float128_ieee (machine_mode mode)
17790 if (FLOAT128_VECTOR_P (mode))
17792 static bool complex_muldiv_init_p = false;
17794 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17795 we have clone or target attributes, this will be called a second
17796 time. We want to create the built-in function only once. */
17797 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17799 complex_muldiv_init_p = true;
17800 built_in_function fncode_mul =
17801 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17802 - MIN_MODE_COMPLEX_FLOAT);
17803 built_in_function fncode_div =
17804 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17805 - MIN_MODE_COMPLEX_FLOAT);
17807 tree fntype = build_function_type_list (complex_long_double_type_node,
17808 long_double_type_node,
17809 long_double_type_node,
17810 long_double_type_node,
17811 long_double_type_node,
17812 NULL_TREE);
17814 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17815 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17818 set_optab_libfunc (add_optab, mode, "__addkf3");
17819 set_optab_libfunc (sub_optab, mode, "__subkf3");
17820 set_optab_libfunc (neg_optab, mode, "__negkf2");
17821 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17822 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17823 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17824 set_optab_libfunc (abs_optab, mode, "__abskf2");
17825 set_optab_libfunc (powi_optab, mode, "__powikf2");
17827 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17828 set_optab_libfunc (ne_optab, mode, "__nekf2");
17829 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17830 set_optab_libfunc (ge_optab, mode, "__gekf2");
17831 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17832 set_optab_libfunc (le_optab, mode, "__lekf2");
17833 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17835 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17836 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17837 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17838 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17840 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17841 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17842 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17844 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17845 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17846 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17848 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17849 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17850 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17851 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17852 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17853 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17855 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17856 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17857 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17858 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17860 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17861 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17862 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17863 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17865 if (TARGET_POWERPC64)
17867 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17868 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17869 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17870 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17874 else
17876 set_optab_libfunc (add_optab, mode, "_q_add");
17877 set_optab_libfunc (sub_optab, mode, "_q_sub");
17878 set_optab_libfunc (neg_optab, mode, "_q_neg");
17879 set_optab_libfunc (smul_optab, mode, "_q_mul");
17880 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17881 if (TARGET_PPC_GPOPT)
17882 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17884 set_optab_libfunc (eq_optab, mode, "_q_feq");
17885 set_optab_libfunc (ne_optab, mode, "_q_fne");
17886 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17887 set_optab_libfunc (ge_optab, mode, "_q_fge");
17888 set_optab_libfunc (lt_optab, mode, "_q_flt");
17889 set_optab_libfunc (le_optab, mode, "_q_fle");
17891 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17892 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17893 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17894 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17895 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17896 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17897 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17898 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17902 static void
17903 rs6000_init_libfuncs (void)
17905 /* __float128 support. */
17906 if (TARGET_FLOAT128_TYPE)
17908 init_float128_ibm (IFmode);
17909 init_float128_ieee (KFmode);
17912 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17913 if (TARGET_LONG_DOUBLE_128)
17915 if (!TARGET_IEEEQUAD)
17916 init_float128_ibm (TFmode);
17918 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17919 else
17920 init_float128_ieee (TFmode);
17924 /* Emit a potentially record-form instruction, setting DST from SRC.
17925 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17926 signed comparison of DST with zero. If DOT is 1, the generated RTL
17927 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17928 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17929 a separate COMPARE. */
17931 void
17932 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17934 if (dot == 0)
17936 emit_move_insn (dst, src);
17937 return;
17940 if (cc_reg_not_cr0_operand (ccreg, CCmode))
17942 emit_move_insn (dst, src);
17943 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
17944 return;
17947 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
17948 if (dot == 1)
17950 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
17951 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
17953 else
17955 rtx set = gen_rtx_SET (dst, src);
17956 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
17961 /* A validation routine: say whether CODE, a condition code, and MODE
17962 match. The other alternatives either don't make sense or should
17963 never be generated. */
17965 void
17966 validate_condition_mode (enum rtx_code code, machine_mode mode)
17968 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
17969 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
17970 && GET_MODE_CLASS (mode) == MODE_CC);
17972 /* These don't make sense. */
17973 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
17974 || mode != CCUNSmode);
17976 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
17977 || mode == CCUNSmode);
17979 gcc_assert (mode == CCFPmode
17980 || (code != ORDERED && code != UNORDERED
17981 && code != UNEQ && code != LTGT
17982 && code != UNGT && code != UNLT
17983 && code != UNGE && code != UNLE));
17985 /* These should never be generated except for
17986 flag_finite_math_only. */
17987 gcc_assert (mode != CCFPmode
17988 || flag_finite_math_only
17989 || (code != LE && code != GE
17990 && code != UNEQ && code != LTGT
17991 && code != UNGT && code != UNLT));
17993 /* These are invalid; the information is not there. */
17994 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
17998 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
17999 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18000 not zero, store there the bit offset (counted from the right) where
18001 the single stretch of 1 bits begins; and similarly for B, the bit
18002 offset where it ends. */
18004 bool
18005 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18007 unsigned HOST_WIDE_INT val = INTVAL (mask);
18008 unsigned HOST_WIDE_INT bit;
18009 int nb, ne;
18010 int n = GET_MODE_PRECISION (mode);
18012 if (mode != DImode && mode != SImode)
18013 return false;
18015 if (INTVAL (mask) >= 0)
18017 bit = val & -val;
18018 ne = exact_log2 (bit);
18019 nb = exact_log2 (val + bit);
18021 else if (val + 1 == 0)
18023 nb = n;
18024 ne = 0;
18026 else if (val & 1)
18028 val = ~val;
18029 bit = val & -val;
18030 nb = exact_log2 (bit);
18031 ne = exact_log2 (val + bit);
18033 else
18035 bit = val & -val;
18036 ne = exact_log2 (bit);
18037 if (val + bit == 0)
18038 nb = n;
18039 else
18040 nb = 0;
18043 nb--;
18045 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18046 return false;
18048 if (b)
18049 *b = nb;
18050 if (e)
18051 *e = ne;
18053 return true;
18056 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18057 or rldicr instruction, to implement an AND with it in mode MODE. */
18059 bool
18060 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18062 int nb, ne;
18064 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18065 return false;
18067 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18068 does not wrap. */
18069 if (mode == DImode)
18070 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18072 /* For SImode, rlwinm can do everything. */
18073 if (mode == SImode)
18074 return (nb < 32 && ne < 32);
18076 return false;
18079 /* Return the instruction template for an AND with mask in mode MODE, with
18080 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18082 const char *
18083 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18085 int nb, ne;
18087 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18088 gcc_unreachable ();
18090 if (mode == DImode && ne == 0)
18092 operands[3] = GEN_INT (63 - nb);
18093 if (dot)
18094 return "rldicl. %0,%1,0,%3";
18095 return "rldicl %0,%1,0,%3";
18098 if (mode == DImode && nb == 63)
18100 operands[3] = GEN_INT (63 - ne);
18101 if (dot)
18102 return "rldicr. %0,%1,0,%3";
18103 return "rldicr %0,%1,0,%3";
18106 if (nb < 32 && ne < 32)
18108 operands[3] = GEN_INT (31 - nb);
18109 operands[4] = GEN_INT (31 - ne);
18110 if (dot)
18111 return "rlwinm. %0,%1,0,%3,%4";
18112 return "rlwinm %0,%1,0,%3,%4";
18115 gcc_unreachable ();
18118 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18119 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18120 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18122 bool
18123 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18125 int nb, ne;
18127 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18128 return false;
18130 int n = GET_MODE_PRECISION (mode);
18131 int sh = -1;
18133 if (CONST_INT_P (XEXP (shift, 1)))
18135 sh = INTVAL (XEXP (shift, 1));
18136 if (sh < 0 || sh >= n)
18137 return false;
18140 rtx_code code = GET_CODE (shift);
18142 /* Convert any shift by 0 to a rotate, to simplify below code. */
18143 if (sh == 0)
18144 code = ROTATE;
18146 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18147 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18148 code = ASHIFT;
18149 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18151 code = LSHIFTRT;
18152 sh = n - sh;
18155 /* DImode rotates need rld*. */
18156 if (mode == DImode && code == ROTATE)
18157 return (nb == 63 || ne == 0 || ne == sh);
18159 /* SImode rotates need rlw*. */
18160 if (mode == SImode && code == ROTATE)
18161 return (nb < 32 && ne < 32 && sh < 32);
18163 /* Wrap-around masks are only okay for rotates. */
18164 if (ne > nb)
18165 return false;
18167 /* Variable shifts are only okay for rotates. */
18168 if (sh < 0)
18169 return false;
18171 /* Don't allow ASHIFT if the mask is wrong for that. */
18172 if (code == ASHIFT && ne < sh)
18173 return false;
18175 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18176 if the mask is wrong for that. */
18177 if (nb < 32 && ne < 32 && sh < 32
18178 && !(code == LSHIFTRT && nb >= 32 - sh))
18179 return true;
18181 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18182 if the mask is wrong for that. */
18183 if (code == LSHIFTRT)
18184 sh = 64 - sh;
18185 if (nb == 63 || ne == 0 || ne == sh)
18186 return !(code == LSHIFTRT && nb >= sh);
18188 return false;
18191 /* Return the instruction template for a shift with mask in mode MODE, with
18192 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18194 const char *
18195 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18197 int nb, ne;
18199 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18200 gcc_unreachable ();
18202 if (mode == DImode && ne == 0)
18204 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18205 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18206 operands[3] = GEN_INT (63 - nb);
18207 if (dot)
18208 return "rld%I2cl. %0,%1,%2,%3";
18209 return "rld%I2cl %0,%1,%2,%3";
18212 if (mode == DImode && nb == 63)
18214 operands[3] = GEN_INT (63 - ne);
18215 if (dot)
18216 return "rld%I2cr. %0,%1,%2,%3";
18217 return "rld%I2cr %0,%1,%2,%3";
18220 if (mode == DImode
18221 && GET_CODE (operands[4]) != LSHIFTRT
18222 && CONST_INT_P (operands[2])
18223 && ne == INTVAL (operands[2]))
18225 operands[3] = GEN_INT (63 - nb);
18226 if (dot)
18227 return "rld%I2c. %0,%1,%2,%3";
18228 return "rld%I2c %0,%1,%2,%3";
18231 if (nb < 32 && ne < 32)
18233 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18234 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18235 operands[3] = GEN_INT (31 - nb);
18236 operands[4] = GEN_INT (31 - ne);
18237 /* This insn can also be a 64-bit rotate with mask that really makes
18238 it just a shift right (with mask); the %h below are to adjust for
18239 that situation (shift count is >= 32 in that case). */
18240 if (dot)
18241 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18242 return "rlw%I2nm %0,%1,%h2,%3,%4";
18245 gcc_unreachable ();
18248 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18249 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18250 ASHIFT, or LSHIFTRT) in mode MODE. */
18252 bool
18253 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18255 int nb, ne;
18257 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18258 return false;
18260 int n = GET_MODE_PRECISION (mode);
18262 int sh = INTVAL (XEXP (shift, 1));
18263 if (sh < 0 || sh >= n)
18264 return false;
18266 rtx_code code = GET_CODE (shift);
18268 /* Convert any shift by 0 to a rotate, to simplify below code. */
18269 if (sh == 0)
18270 code = ROTATE;
18272 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18273 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18274 code = ASHIFT;
18275 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18277 code = LSHIFTRT;
18278 sh = n - sh;
18281 /* DImode rotates need rldimi. */
18282 if (mode == DImode && code == ROTATE)
18283 return (ne == sh);
18285 /* SImode rotates need rlwimi. */
18286 if (mode == SImode && code == ROTATE)
18287 return (nb < 32 && ne < 32 && sh < 32);
18289 /* Wrap-around masks are only okay for rotates. */
18290 if (ne > nb)
18291 return false;
18293 /* Don't allow ASHIFT if the mask is wrong for that. */
18294 if (code == ASHIFT && ne < sh)
18295 return false;
18297 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18298 if the mask is wrong for that. */
18299 if (nb < 32 && ne < 32 && sh < 32
18300 && !(code == LSHIFTRT && nb >= 32 - sh))
18301 return true;
18303 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18304 if the mask is wrong for that. */
18305 if (code == LSHIFTRT)
18306 sh = 64 - sh;
18307 if (ne == sh)
18308 return !(code == LSHIFTRT && nb >= sh);
18310 return false;
18313 /* Return the instruction template for an insert with mask in mode MODE, with
18314 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18316 const char *
18317 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18319 int nb, ne;
18321 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18322 gcc_unreachable ();
18324 /* Prefer rldimi because rlwimi is cracked. */
18325 if (TARGET_POWERPC64
18326 && (!dot || mode == DImode)
18327 && GET_CODE (operands[4]) != LSHIFTRT
18328 && ne == INTVAL (operands[2]))
18330 operands[3] = GEN_INT (63 - nb);
18331 if (dot)
18332 return "rldimi. %0,%1,%2,%3";
18333 return "rldimi %0,%1,%2,%3";
18336 if (nb < 32 && ne < 32)
18338 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18339 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18340 operands[3] = GEN_INT (31 - nb);
18341 operands[4] = GEN_INT (31 - ne);
18342 if (dot)
18343 return "rlwimi. %0,%1,%2,%3,%4";
18344 return "rlwimi %0,%1,%2,%3,%4";
18347 gcc_unreachable ();
18350 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18351 using two machine instructions. */
18353 bool
18354 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18356 /* There are two kinds of AND we can handle with two insns:
18357 1) those we can do with two rl* insn;
18358 2) ori[s];xori[s].
18360 We do not handle that last case yet. */
18362 /* If there is just one stretch of ones, we can do it. */
18363 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18364 return true;
18366 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18367 one insn, we can do the whole thing with two. */
18368 unsigned HOST_WIDE_INT val = INTVAL (c);
18369 unsigned HOST_WIDE_INT bit1 = val & -val;
18370 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18371 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18372 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18373 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18376 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18377 If EXPAND is true, split rotate-and-mask instructions we generate to
18378 their constituent parts as well (this is used during expand); if DOT
18379 is 1, make the last insn a record-form instruction clobbering the
18380 destination GPR and setting the CC reg (from operands[3]); if 2, set
18381 that GPR as well as the CC reg. */
18383 void
18384 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18386 gcc_assert (!(expand && dot));
18388 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18390 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18391 shift right. This generates better code than doing the masks without
18392 shifts, or shifting first right and then left. */
18393 int nb, ne;
18394 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18396 gcc_assert (mode == DImode);
18398 int shift = 63 - nb;
18399 if (expand)
18401 rtx tmp1 = gen_reg_rtx (DImode);
18402 rtx tmp2 = gen_reg_rtx (DImode);
18403 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18404 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18405 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18407 else
18409 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18410 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18411 emit_move_insn (operands[0], tmp);
18412 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18413 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18415 return;
18418 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18419 that does the rest. */
18420 unsigned HOST_WIDE_INT bit1 = val & -val;
18421 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18422 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18423 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18425 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18426 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18428 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18430 /* Two "no-rotate"-and-mask instructions, for SImode. */
18431 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18433 gcc_assert (mode == SImode);
18435 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18436 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18437 emit_move_insn (reg, tmp);
18438 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18439 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18440 return;
18443 gcc_assert (mode == DImode);
18445 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18446 insns; we have to do the first in SImode, because it wraps. */
18447 if (mask2 <= 0xffffffff
18448 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18450 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18451 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18452 GEN_INT (mask1));
18453 rtx reg_low = gen_lowpart (SImode, reg);
18454 emit_move_insn (reg_low, tmp);
18455 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18456 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18457 return;
18460 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18461 at the top end), rotate back and clear the other hole. */
18462 int right = exact_log2 (bit3);
18463 int left = 64 - right;
18465 /* Rotate the mask too. */
18466 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18468 if (expand)
18470 rtx tmp1 = gen_reg_rtx (DImode);
18471 rtx tmp2 = gen_reg_rtx (DImode);
18472 rtx tmp3 = gen_reg_rtx (DImode);
18473 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18474 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18475 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18476 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18478 else
18480 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18481 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18482 emit_move_insn (operands[0], tmp);
18483 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18484 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18485 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18489 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18490 for lfq and stfq insns iff the registers are hard registers. */
18493 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18495 /* We might have been passed a SUBREG. */
18496 if (!REG_P (reg1) || !REG_P (reg2))
18497 return 0;
18499 /* We might have been passed non floating point registers. */
18500 if (!FP_REGNO_P (REGNO (reg1))
18501 || !FP_REGNO_P (REGNO (reg2)))
18502 return 0;
18504 return (REGNO (reg1) == REGNO (reg2) - 1);
18507 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18508 addr1 and addr2 must be in consecutive memory locations
18509 (addr2 == addr1 + 8). */
18512 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18514 rtx addr1, addr2;
18515 unsigned int reg1, reg2;
18516 int offset1, offset2;
18518 /* The mems cannot be volatile. */
18519 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18520 return 0;
18522 addr1 = XEXP (mem1, 0);
18523 addr2 = XEXP (mem2, 0);
18525 /* Extract an offset (if used) from the first addr. */
18526 if (GET_CODE (addr1) == PLUS)
18528 /* If not a REG, return zero. */
18529 if (!REG_P (XEXP (addr1, 0)))
18530 return 0;
18531 else
18533 reg1 = REGNO (XEXP (addr1, 0));
18534 /* The offset must be constant! */
18535 if (!CONST_INT_P (XEXP (addr1, 1)))
18536 return 0;
18537 offset1 = INTVAL (XEXP (addr1, 1));
18540 else if (!REG_P (addr1))
18541 return 0;
18542 else
18544 reg1 = REGNO (addr1);
18545 /* This was a simple (mem (reg)) expression. Offset is 0. */
18546 offset1 = 0;
18549 /* And now for the second addr. */
18550 if (GET_CODE (addr2) == PLUS)
18552 /* If not a REG, return zero. */
18553 if (!REG_P (XEXP (addr2, 0)))
18554 return 0;
18555 else
18557 reg2 = REGNO (XEXP (addr2, 0));
18558 /* The offset must be constant. */
18559 if (!CONST_INT_P (XEXP (addr2, 1)))
18560 return 0;
18561 offset2 = INTVAL (XEXP (addr2, 1));
18564 else if (!REG_P (addr2))
18565 return 0;
18566 else
18568 reg2 = REGNO (addr2);
18569 /* This was a simple (mem (reg)) expression. Offset is 0. */
18570 offset2 = 0;
18573 /* Both of these must have the same base register. */
18574 if (reg1 != reg2)
18575 return 0;
18577 /* The offset for the second addr must be 8 more than the first addr. */
18578 if (offset2 != offset1 + 8)
18579 return 0;
18581 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18582 instructions. */
18583 return 1;
18586 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18587 need to use DDmode, in all other cases we can use the same mode. */
18588 static machine_mode
18589 rs6000_secondary_memory_needed_mode (machine_mode mode)
18591 if (lra_in_progress && mode == SDmode)
18592 return DDmode;
18593 return mode;
18596 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18597 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18598 only work on the traditional altivec registers, note if an altivec register
18599 was chosen. */
18601 static enum rs6000_reg_type
18602 register_to_reg_type (rtx reg, bool *is_altivec)
18604 HOST_WIDE_INT regno;
18605 enum reg_class rclass;
18607 if (SUBREG_P (reg))
18608 reg = SUBREG_REG (reg);
18610 if (!REG_P (reg))
18611 return NO_REG_TYPE;
18613 regno = REGNO (reg);
18614 if (!HARD_REGISTER_NUM_P (regno))
18616 if (!lra_in_progress && !reload_completed)
18617 return PSEUDO_REG_TYPE;
18619 regno = true_regnum (reg);
18620 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18621 return PSEUDO_REG_TYPE;
18624 gcc_assert (regno >= 0);
18626 if (is_altivec && ALTIVEC_REGNO_P (regno))
18627 *is_altivec = true;
18629 rclass = rs6000_regno_regclass[regno];
18630 return reg_class_to_reg_type[(int)rclass];
18633 /* Helper function to return the cost of adding a TOC entry address. */
18635 static inline int
18636 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18638 int ret;
18640 if (TARGET_CMODEL != CMODEL_SMALL)
18641 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18643 else
18644 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18646 return ret;
18649 /* Helper function for rs6000_secondary_reload to determine whether the memory
18650 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18651 needs reloading. Return negative if the memory is not handled by the memory
18652 helper functions and to try a different reload method, 0 if no additional
18653 instructions are need, and positive to give the extra cost for the
18654 memory. */
18656 static int
18657 rs6000_secondary_reload_memory (rtx addr,
18658 enum reg_class rclass,
18659 machine_mode mode)
18661 int extra_cost = 0;
18662 rtx reg, and_arg, plus_arg0, plus_arg1;
18663 addr_mask_type addr_mask;
18664 const char *type = NULL;
18665 const char *fail_msg = NULL;
18667 if (GPR_REG_CLASS_P (rclass))
18668 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18670 else if (rclass == FLOAT_REGS)
18671 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18673 else if (rclass == ALTIVEC_REGS)
18674 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18676 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18677 else if (rclass == VSX_REGS)
18678 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18679 & ~RELOAD_REG_AND_M16);
18681 /* If the register allocator hasn't made up its mind yet on the register
18682 class to use, settle on defaults to use. */
18683 else if (rclass == NO_REGS)
18685 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18686 & ~RELOAD_REG_AND_M16);
18688 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18689 addr_mask &= ~(RELOAD_REG_INDEXED
18690 | RELOAD_REG_PRE_INCDEC
18691 | RELOAD_REG_PRE_MODIFY);
18694 else
18695 addr_mask = 0;
18697 /* If the register isn't valid in this register class, just return now. */
18698 if ((addr_mask & RELOAD_REG_VALID) == 0)
18700 if (TARGET_DEBUG_ADDR)
18702 fprintf (stderr,
18703 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18704 "not valid in class\n",
18705 GET_MODE_NAME (mode), reg_class_names[rclass]);
18706 debug_rtx (addr);
18709 return -1;
18712 switch (GET_CODE (addr))
18714 /* Does the register class supports auto update forms for this mode? We
18715 don't need a scratch register, since the powerpc only supports
18716 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18717 case PRE_INC:
18718 case PRE_DEC:
18719 reg = XEXP (addr, 0);
18720 if (!base_reg_operand (addr, GET_MODE (reg)))
18722 fail_msg = "no base register #1";
18723 extra_cost = -1;
18726 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18728 extra_cost = 1;
18729 type = "update";
18731 break;
18733 case PRE_MODIFY:
18734 reg = XEXP (addr, 0);
18735 plus_arg1 = XEXP (addr, 1);
18736 if (!base_reg_operand (reg, GET_MODE (reg))
18737 || GET_CODE (plus_arg1) != PLUS
18738 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18740 fail_msg = "bad PRE_MODIFY";
18741 extra_cost = -1;
18744 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18746 extra_cost = 1;
18747 type = "update";
18749 break;
18751 /* Do we need to simulate AND -16 to clear the bottom address bits used
18752 in VMX load/stores? Only allow the AND for vector sizes. */
18753 case AND:
18754 and_arg = XEXP (addr, 0);
18755 if (GET_MODE_SIZE (mode) != 16
18756 || !CONST_INT_P (XEXP (addr, 1))
18757 || INTVAL (XEXP (addr, 1)) != -16)
18759 fail_msg = "bad Altivec AND #1";
18760 extra_cost = -1;
18763 if (rclass != ALTIVEC_REGS)
18765 if (legitimate_indirect_address_p (and_arg, false))
18766 extra_cost = 1;
18768 else if (legitimate_indexed_address_p (and_arg, false))
18769 extra_cost = 2;
18771 else
18773 fail_msg = "bad Altivec AND #2";
18774 extra_cost = -1;
18777 type = "and";
18779 break;
18781 /* If this is an indirect address, make sure it is a base register. */
18782 case REG:
18783 case SUBREG:
18784 if (!legitimate_indirect_address_p (addr, false))
18786 extra_cost = 1;
18787 type = "move";
18789 break;
18791 /* If this is an indexed address, make sure the register class can handle
18792 indexed addresses for this mode. */
18793 case PLUS:
18794 plus_arg0 = XEXP (addr, 0);
18795 plus_arg1 = XEXP (addr, 1);
18797 /* (plus (plus (reg) (constant)) (constant)) is generated during
18798 push_reload processing, so handle it now. */
18799 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18801 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18803 extra_cost = 1;
18804 type = "offset";
18808 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18809 push_reload processing, so handle it now. */
18810 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18812 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18814 extra_cost = 1;
18815 type = "indexed #2";
18819 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18821 fail_msg = "no base register #2";
18822 extra_cost = -1;
18825 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18827 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18828 || !legitimate_indexed_address_p (addr, false))
18830 extra_cost = 1;
18831 type = "indexed";
18835 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18836 && CONST_INT_P (plus_arg1))
18838 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18840 extra_cost = 1;
18841 type = "vector d-form offset";
18845 /* Make sure the register class can handle offset addresses. */
18846 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18848 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18850 extra_cost = 1;
18851 type = "offset #2";
18855 else
18857 fail_msg = "bad PLUS";
18858 extra_cost = -1;
18861 break;
18863 case LO_SUM:
18864 /* Quad offsets are restricted and can't handle normal addresses. */
18865 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18867 extra_cost = -1;
18868 type = "vector d-form lo_sum";
18871 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18873 fail_msg = "bad LO_SUM";
18874 extra_cost = -1;
18877 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18879 extra_cost = 1;
18880 type = "lo_sum";
18882 break;
18884 /* Static addresses need to create a TOC entry. */
18885 case CONST:
18886 case SYMBOL_REF:
18887 case LABEL_REF:
18888 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18890 extra_cost = -1;
18891 type = "vector d-form lo_sum #2";
18894 else
18896 type = "address";
18897 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18899 break;
18901 /* TOC references look like offsetable memory. */
18902 case UNSPEC:
18903 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18905 fail_msg = "bad UNSPEC";
18906 extra_cost = -1;
18909 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18911 extra_cost = -1;
18912 type = "vector d-form lo_sum #3";
18915 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18917 extra_cost = 1;
18918 type = "toc reference";
18920 break;
18922 default:
18924 fail_msg = "bad address";
18925 extra_cost = -1;
18929 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18931 if (extra_cost < 0)
18932 fprintf (stderr,
18933 "rs6000_secondary_reload_memory error: mode = %s, "
18934 "class = %s, addr_mask = '%s', %s\n",
18935 GET_MODE_NAME (mode),
18936 reg_class_names[rclass],
18937 rs6000_debug_addr_mask (addr_mask, false),
18938 (fail_msg != NULL) ? fail_msg : "<bad address>");
18940 else
18941 fprintf (stderr,
18942 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18943 "addr_mask = '%s', extra cost = %d, %s\n",
18944 GET_MODE_NAME (mode),
18945 reg_class_names[rclass],
18946 rs6000_debug_addr_mask (addr_mask, false),
18947 extra_cost,
18948 (type) ? type : "<none>");
18950 debug_rtx (addr);
18953 return extra_cost;
18956 /* Helper function for rs6000_secondary_reload to return true if a move to a
18957 different register classe is really a simple move. */
18959 static bool
18960 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
18961 enum rs6000_reg_type from_type,
18962 machine_mode mode)
18964 int size = GET_MODE_SIZE (mode);
18966 /* Add support for various direct moves available. In this function, we only
18967 look at cases where we don't need any extra registers, and one or more
18968 simple move insns are issued. Originally small integers are not allowed
18969 in FPR/VSX registers. Single precision binary floating is not a simple
18970 move because we need to convert to the single precision memory layout.
18971 The 4-byte SDmode can be moved. TDmode values are disallowed since they
18972 need special direct move handling, which we do not support yet. */
18973 if (TARGET_DIRECT_MOVE
18974 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18975 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
18977 if (TARGET_POWERPC64)
18979 /* ISA 2.07: MTVSRD or MVFVSRD. */
18980 if (size == 8)
18981 return true;
18983 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
18984 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
18985 return true;
18988 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18989 if (TARGET_P8_VECTOR)
18991 if (mode == SImode)
18992 return true;
18994 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
18995 return true;
18998 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18999 if (mode == SDmode)
19000 return true;
19003 /* Move to/from SPR. */
19004 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19005 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19006 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19007 return true;
19009 return false;
19012 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19013 special direct moves that involve allocating an extra register, return the
19014 insn code of the helper function if there is such a function or
19015 CODE_FOR_nothing if not. */
19017 static bool
19018 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19019 enum rs6000_reg_type from_type,
19020 machine_mode mode,
19021 secondary_reload_info *sri,
19022 bool altivec_p)
19024 bool ret = false;
19025 enum insn_code icode = CODE_FOR_nothing;
19026 int cost = 0;
19027 int size = GET_MODE_SIZE (mode);
19029 if (TARGET_POWERPC64 && size == 16)
19031 /* Handle moving 128-bit values from GPRs to VSX point registers on
19032 ISA 2.07 (power8, power9) when running in 64-bit mode using
19033 XXPERMDI to glue the two 64-bit values back together. */
19034 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19036 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19037 icode = reg_addr[mode].reload_vsx_gpr;
19040 /* Handle moving 128-bit values from VSX point registers to GPRs on
19041 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19042 bottom 64-bit value. */
19043 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19045 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19046 icode = reg_addr[mode].reload_gpr_vsx;
19050 else if (TARGET_POWERPC64 && mode == SFmode)
19052 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19054 cost = 3; /* xscvdpspn, mfvsrd, and. */
19055 icode = reg_addr[mode].reload_gpr_vsx;
19058 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19060 cost = 2; /* mtvsrz, xscvspdpn. */
19061 icode = reg_addr[mode].reload_vsx_gpr;
19065 else if (!TARGET_POWERPC64 && size == 8)
19067 /* Handle moving 64-bit values from GPRs to floating point registers on
19068 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19069 32-bit values back together. Altivec register classes must be handled
19070 specially since a different instruction is used, and the secondary
19071 reload support requires a single instruction class in the scratch
19072 register constraint. However, right now TFmode is not allowed in
19073 Altivec registers, so the pattern will never match. */
19074 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19076 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19077 icode = reg_addr[mode].reload_fpr_gpr;
19081 if (icode != CODE_FOR_nothing)
19083 ret = true;
19084 if (sri)
19086 sri->icode = icode;
19087 sri->extra_cost = cost;
19091 return ret;
19094 /* Return whether a move between two register classes can be done either
19095 directly (simple move) or via a pattern that uses a single extra temporary
19096 (using ISA 2.07's direct move in this case. */
19098 static bool
19099 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19100 enum rs6000_reg_type from_type,
19101 machine_mode mode,
19102 secondary_reload_info *sri,
19103 bool altivec_p)
19105 /* Fall back to load/store reloads if either type is not a register. */
19106 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19107 return false;
19109 /* If we haven't allocated registers yet, assume the move can be done for the
19110 standard register types. */
19111 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19112 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19113 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19114 return true;
19116 /* Moves to the same set of registers is a simple move for non-specialized
19117 registers. */
19118 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19119 return true;
19121 /* Check whether a simple move can be done directly. */
19122 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19124 if (sri)
19126 sri->icode = CODE_FOR_nothing;
19127 sri->extra_cost = 0;
19129 return true;
19132 /* Now check if we can do it in a few steps. */
19133 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19134 altivec_p);
19137 /* Inform reload about cases where moving X with a mode MODE to a register in
19138 RCLASS requires an extra scratch or immediate register. Return the class
19139 needed for the immediate register.
19141 For VSX and Altivec, we may need a register to convert sp+offset into
19142 reg+sp.
19144 For misaligned 64-bit gpr loads and stores we need a register to
19145 convert an offset address to indirect. */
19147 static reg_class_t
19148 rs6000_secondary_reload (bool in_p,
19149 rtx x,
19150 reg_class_t rclass_i,
19151 machine_mode mode,
19152 secondary_reload_info *sri)
19154 enum reg_class rclass = (enum reg_class) rclass_i;
19155 reg_class_t ret = ALL_REGS;
19156 enum insn_code icode;
19157 bool default_p = false;
19158 bool done_p = false;
19160 /* Allow subreg of memory before/during reload. */
19161 bool memory_p = (MEM_P (x)
19162 || (!reload_completed && SUBREG_P (x)
19163 && MEM_P (SUBREG_REG (x))));
19165 sri->icode = CODE_FOR_nothing;
19166 sri->t_icode = CODE_FOR_nothing;
19167 sri->extra_cost = 0;
19168 icode = ((in_p)
19169 ? reg_addr[mode].reload_load
19170 : reg_addr[mode].reload_store);
19172 if (REG_P (x) || register_operand (x, mode))
19174 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19175 bool altivec_p = (rclass == ALTIVEC_REGS);
19176 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19178 if (!in_p)
19179 std::swap (to_type, from_type);
19181 /* Can we do a direct move of some sort? */
19182 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19183 altivec_p))
19185 icode = (enum insn_code)sri->icode;
19186 default_p = false;
19187 done_p = true;
19188 ret = NO_REGS;
19192 /* Make sure 0.0 is not reloaded or forced into memory. */
19193 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19195 ret = NO_REGS;
19196 default_p = false;
19197 done_p = true;
19200 /* If this is a scalar floating point value and we want to load it into the
19201 traditional Altivec registers, do it via a move via a traditional floating
19202 point register, unless we have D-form addressing. Also make sure that
19203 non-zero constants use a FPR. */
19204 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19205 && !mode_supports_vmx_dform (mode)
19206 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19207 && (memory_p || CONST_DOUBLE_P (x)))
19209 ret = FLOAT_REGS;
19210 default_p = false;
19211 done_p = true;
19214 /* Handle reload of load/stores if we have reload helper functions. */
19215 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19217 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19218 mode);
19220 if (extra_cost >= 0)
19222 done_p = true;
19223 ret = NO_REGS;
19224 if (extra_cost > 0)
19226 sri->extra_cost = extra_cost;
19227 sri->icode = icode;
19232 /* Handle unaligned loads and stores of integer registers. */
19233 if (!done_p && TARGET_POWERPC64
19234 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19235 && memory_p
19236 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19238 rtx addr = XEXP (x, 0);
19239 rtx off = address_offset (addr);
19241 if (off != NULL_RTX)
19243 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19244 unsigned HOST_WIDE_INT offset = INTVAL (off);
19246 /* We need a secondary reload when our legitimate_address_p
19247 says the address is good (as otherwise the entire address
19248 will be reloaded), and the offset is not a multiple of
19249 four or we have an address wrap. Address wrap will only
19250 occur for LO_SUMs since legitimate_offset_address_p
19251 rejects addresses for 16-byte mems that will wrap. */
19252 if (GET_CODE (addr) == LO_SUM
19253 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19254 && ((offset & 3) != 0
19255 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19256 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19257 && (offset & 3) != 0))
19259 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19260 if (in_p)
19261 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19262 : CODE_FOR_reload_di_load);
19263 else
19264 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19265 : CODE_FOR_reload_di_store);
19266 sri->extra_cost = 2;
19267 ret = NO_REGS;
19268 done_p = true;
19270 else
19271 default_p = true;
19273 else
19274 default_p = true;
19277 if (!done_p && !TARGET_POWERPC64
19278 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19279 && memory_p
19280 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19282 rtx addr = XEXP (x, 0);
19283 rtx off = address_offset (addr);
19285 if (off != NULL_RTX)
19287 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19288 unsigned HOST_WIDE_INT offset = INTVAL (off);
19290 /* We need a secondary reload when our legitimate_address_p
19291 says the address is good (as otherwise the entire address
19292 will be reloaded), and we have a wrap.
19294 legitimate_lo_sum_address_p allows LO_SUM addresses to
19295 have any offset so test for wrap in the low 16 bits.
19297 legitimate_offset_address_p checks for the range
19298 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19299 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19300 [0x7ff4,0x7fff] respectively, so test for the
19301 intersection of these ranges, [0x7ffc,0x7fff] and
19302 [0x7ff4,0x7ff7] respectively.
19304 Note that the address we see here may have been
19305 manipulated by legitimize_reload_address. */
19306 if (GET_CODE (addr) == LO_SUM
19307 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19308 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19310 if (in_p)
19311 sri->icode = CODE_FOR_reload_si_load;
19312 else
19313 sri->icode = CODE_FOR_reload_si_store;
19314 sri->extra_cost = 2;
19315 ret = NO_REGS;
19316 done_p = true;
19318 else
19319 default_p = true;
19321 else
19322 default_p = true;
19325 if (!done_p)
19326 default_p = true;
19328 if (default_p)
19329 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19331 gcc_assert (ret != ALL_REGS);
19333 if (TARGET_DEBUG_ADDR)
19335 fprintf (stderr,
19336 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19337 "mode = %s",
19338 reg_class_names[ret],
19339 in_p ? "true" : "false",
19340 reg_class_names[rclass],
19341 GET_MODE_NAME (mode));
19343 if (reload_completed)
19344 fputs (", after reload", stderr);
19346 if (!done_p)
19347 fputs (", done_p not set", stderr);
19349 if (default_p)
19350 fputs (", default secondary reload", stderr);
19352 if (sri->icode != CODE_FOR_nothing)
19353 fprintf (stderr, ", reload func = %s, extra cost = %d",
19354 insn_data[sri->icode].name, sri->extra_cost);
19356 else if (sri->extra_cost > 0)
19357 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19359 fputs ("\n", stderr);
19360 debug_rtx (x);
19363 return ret;
19366 /* Better tracing for rs6000_secondary_reload_inner. */
19368 static void
19369 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19370 bool store_p)
19372 rtx set, clobber;
19374 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19376 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19377 store_p ? "store" : "load");
19379 if (store_p)
19380 set = gen_rtx_SET (mem, reg);
19381 else
19382 set = gen_rtx_SET (reg, mem);
19384 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19385 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19388 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19389 ATTRIBUTE_NORETURN;
19391 static void
19392 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19393 bool store_p)
19395 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19396 gcc_unreachable ();
19399 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19400 reload helper functions. These were identified in
19401 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19402 reload, it calls the insns:
19403 reload_<RELOAD:mode>_<P:mptrsize>_store
19404 reload_<RELOAD:mode>_<P:mptrsize>_load
19406 which in turn calls this function, to do whatever is necessary to create
19407 valid addresses. */
19409 void
19410 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19412 int regno = true_regnum (reg);
19413 machine_mode mode = GET_MODE (reg);
19414 addr_mask_type addr_mask;
19415 rtx addr;
19416 rtx new_addr;
19417 rtx op_reg, op0, op1;
19418 rtx and_op;
19419 rtx cc_clobber;
19420 rtvec rv;
19422 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19423 || !base_reg_operand (scratch, GET_MODE (scratch)))
19424 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19426 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19427 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19429 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19430 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19432 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19433 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19435 else
19436 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19438 /* Make sure the mode is valid in this register class. */
19439 if ((addr_mask & RELOAD_REG_VALID) == 0)
19440 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19442 if (TARGET_DEBUG_ADDR)
19443 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19445 new_addr = addr = XEXP (mem, 0);
19446 switch (GET_CODE (addr))
19448 /* Does the register class support auto update forms for this mode? If
19449 not, do the update now. We don't need a scratch register, since the
19450 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19451 case PRE_INC:
19452 case PRE_DEC:
19453 op_reg = XEXP (addr, 0);
19454 if (!base_reg_operand (op_reg, Pmode))
19455 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19457 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19459 int delta = GET_MODE_SIZE (mode);
19460 if (GET_CODE (addr) == PRE_DEC)
19461 delta = -delta;
19462 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19463 new_addr = op_reg;
19465 break;
19467 case PRE_MODIFY:
19468 op0 = XEXP (addr, 0);
19469 op1 = XEXP (addr, 1);
19470 if (!base_reg_operand (op0, Pmode)
19471 || GET_CODE (op1) != PLUS
19472 || !rtx_equal_p (op0, XEXP (op1, 0)))
19473 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19475 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19477 emit_insn (gen_rtx_SET (op0, op1));
19478 new_addr = reg;
19480 break;
19482 /* Do we need to simulate AND -16 to clear the bottom address bits used
19483 in VMX load/stores? */
19484 case AND:
19485 op0 = XEXP (addr, 0);
19486 op1 = XEXP (addr, 1);
19487 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19489 if (REG_P (op0) || SUBREG_P (op0))
19490 op_reg = op0;
19492 else if (GET_CODE (op1) == PLUS)
19494 emit_insn (gen_rtx_SET (scratch, op1));
19495 op_reg = scratch;
19498 else
19499 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19501 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19502 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19503 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19504 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19505 new_addr = scratch;
19507 break;
19509 /* If this is an indirect address, make sure it is a base register. */
19510 case REG:
19511 case SUBREG:
19512 if (!base_reg_operand (addr, GET_MODE (addr)))
19514 emit_insn (gen_rtx_SET (scratch, addr));
19515 new_addr = scratch;
19517 break;
19519 /* If this is an indexed address, make sure the register class can handle
19520 indexed addresses for this mode. */
19521 case PLUS:
19522 op0 = XEXP (addr, 0);
19523 op1 = XEXP (addr, 1);
19524 if (!base_reg_operand (op0, Pmode))
19525 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19527 else if (int_reg_operand (op1, Pmode))
19529 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19531 emit_insn (gen_rtx_SET (scratch, addr));
19532 new_addr = scratch;
19536 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19538 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19539 || !quad_address_p (addr, mode, false))
19541 emit_insn (gen_rtx_SET (scratch, addr));
19542 new_addr = scratch;
19546 /* Make sure the register class can handle offset addresses. */
19547 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19549 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19551 emit_insn (gen_rtx_SET (scratch, addr));
19552 new_addr = scratch;
19556 else
19557 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19559 break;
19561 case LO_SUM:
19562 op0 = XEXP (addr, 0);
19563 op1 = XEXP (addr, 1);
19564 if (!base_reg_operand (op0, Pmode))
19565 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19567 else if (int_reg_operand (op1, Pmode))
19569 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19571 emit_insn (gen_rtx_SET (scratch, addr));
19572 new_addr = scratch;
19576 /* Quad offsets are restricted and can't handle normal addresses. */
19577 else if (mode_supports_dq_form (mode))
19579 emit_insn (gen_rtx_SET (scratch, addr));
19580 new_addr = scratch;
19583 /* Make sure the register class can handle offset addresses. */
19584 else if (legitimate_lo_sum_address_p (mode, addr, false))
19586 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19588 emit_insn (gen_rtx_SET (scratch, addr));
19589 new_addr = scratch;
19593 else
19594 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19596 break;
19598 case SYMBOL_REF:
19599 case CONST:
19600 case LABEL_REF:
19601 rs6000_emit_move (scratch, addr, Pmode);
19602 new_addr = scratch;
19603 break;
19605 default:
19606 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19609 /* Adjust the address if it changed. */
19610 if (addr != new_addr)
19612 mem = replace_equiv_address_nv (mem, new_addr);
19613 if (TARGET_DEBUG_ADDR)
19614 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19617 /* Now create the move. */
19618 if (store_p)
19619 emit_insn (gen_rtx_SET (mem, reg));
19620 else
19621 emit_insn (gen_rtx_SET (reg, mem));
19623 return;
19626 /* Convert reloads involving 64-bit gprs and misaligned offset
19627 addressing, or multiple 32-bit gprs and offsets that are too large,
19628 to use indirect addressing. */
19630 void
19631 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19633 int regno = true_regnum (reg);
19634 enum reg_class rclass;
19635 rtx addr;
19636 rtx scratch_or_premodify = scratch;
19638 if (TARGET_DEBUG_ADDR)
19640 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19641 store_p ? "store" : "load");
19642 fprintf (stderr, "reg:\n");
19643 debug_rtx (reg);
19644 fprintf (stderr, "mem:\n");
19645 debug_rtx (mem);
19646 fprintf (stderr, "scratch:\n");
19647 debug_rtx (scratch);
19650 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19651 gcc_assert (MEM_P (mem));
19652 rclass = REGNO_REG_CLASS (regno);
19653 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19654 addr = XEXP (mem, 0);
19656 if (GET_CODE (addr) == PRE_MODIFY)
19658 gcc_assert (REG_P (XEXP (addr, 0))
19659 && GET_CODE (XEXP (addr, 1)) == PLUS
19660 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19661 scratch_or_premodify = XEXP (addr, 0);
19662 addr = XEXP (addr, 1);
19664 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19666 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19668 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19670 /* Now create the move. */
19671 if (store_p)
19672 emit_insn (gen_rtx_SET (mem, reg));
19673 else
19674 emit_insn (gen_rtx_SET (reg, mem));
19676 return;
19679 /* Given an rtx X being reloaded into a reg required to be
19680 in class CLASS, return the class of reg to actually use.
19681 In general this is just CLASS; but on some machines
19682 in some cases it is preferable to use a more restrictive class.
19684 On the RS/6000, we have to return NO_REGS when we want to reload a
19685 floating-point CONST_DOUBLE to force it to be copied to memory.
19687 We also don't want to reload integer values into floating-point
19688 registers if we can at all help it. In fact, this can
19689 cause reload to die, if it tries to generate a reload of CTR
19690 into a FP register and discovers it doesn't have the memory location
19691 required.
19693 ??? Would it be a good idea to have reload do the converse, that is
19694 try to reload floating modes into FP registers if possible?
19697 static enum reg_class
19698 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19700 machine_mode mode = GET_MODE (x);
19701 bool is_constant = CONSTANT_P (x);
19703 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19704 reload class for it. */
19705 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19706 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19707 return NO_REGS;
19709 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19710 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19711 return NO_REGS;
19713 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19714 the reloading of address expressions using PLUS into floating point
19715 registers. */
19716 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19718 if (is_constant)
19720 /* Zero is always allowed in all VSX registers. */
19721 if (x == CONST0_RTX (mode))
19722 return rclass;
19724 /* If this is a vector constant that can be formed with a few Altivec
19725 instructions, we want altivec registers. */
19726 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19727 return ALTIVEC_REGS;
19729 /* If this is an integer constant that can easily be loaded into
19730 vector registers, allow it. */
19731 if (CONST_INT_P (x))
19733 HOST_WIDE_INT value = INTVAL (x);
19735 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19736 2.06 can generate it in the Altivec registers with
19737 VSPLTI<x>. */
19738 if (value == -1)
19740 if (TARGET_P8_VECTOR)
19741 return rclass;
19742 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19743 return ALTIVEC_REGS;
19744 else
19745 return NO_REGS;
19748 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19749 a sign extend in the Altivec registers. */
19750 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19751 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19752 return ALTIVEC_REGS;
19755 /* Force constant to memory. */
19756 return NO_REGS;
19759 /* D-form addressing can easily reload the value. */
19760 if (mode_supports_vmx_dform (mode)
19761 || mode_supports_dq_form (mode))
19762 return rclass;
19764 /* If this is a scalar floating point value and we don't have D-form
19765 addressing, prefer the traditional floating point registers so that we
19766 can use D-form (register+offset) addressing. */
19767 if (rclass == VSX_REGS
19768 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19769 return FLOAT_REGS;
19771 /* Prefer the Altivec registers if Altivec is handling the vector
19772 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19773 loads. */
19774 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19775 || mode == V1TImode)
19776 return ALTIVEC_REGS;
19778 return rclass;
19781 if (is_constant || GET_CODE (x) == PLUS)
19783 if (reg_class_subset_p (GENERAL_REGS, rclass))
19784 return GENERAL_REGS;
19785 if (reg_class_subset_p (BASE_REGS, rclass))
19786 return BASE_REGS;
19787 return NO_REGS;
19790 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19791 return GENERAL_REGS;
19793 return rclass;
19796 /* Debug version of rs6000_preferred_reload_class. */
19797 static enum reg_class
19798 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19800 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19802 fprintf (stderr,
19803 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19804 "mode = %s, x:\n",
19805 reg_class_names[ret], reg_class_names[rclass],
19806 GET_MODE_NAME (GET_MODE (x)));
19807 debug_rtx (x);
19809 return ret;
19812 /* If we are copying between FP or AltiVec registers and anything else, we need
19813 a memory location. The exception is when we are targeting ppc64 and the
19814 move to/from fpr to gpr instructions are available. Also, under VSX, you
19815 can copy vector registers from the FP register set to the Altivec register
19816 set and vice versa. */
19818 static bool
19819 rs6000_secondary_memory_needed (machine_mode mode,
19820 reg_class_t from_class,
19821 reg_class_t to_class)
19823 enum rs6000_reg_type from_type, to_type;
19824 bool altivec_p = ((from_class == ALTIVEC_REGS)
19825 || (to_class == ALTIVEC_REGS));
19827 /* If a simple/direct move is available, we don't need secondary memory */
19828 from_type = reg_class_to_reg_type[(int)from_class];
19829 to_type = reg_class_to_reg_type[(int)to_class];
19831 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19832 (secondary_reload_info *)0, altivec_p))
19833 return false;
19835 /* If we have a floating point or vector register class, we need to use
19836 memory to transfer the data. */
19837 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19838 return true;
19840 return false;
19843 /* Debug version of rs6000_secondary_memory_needed. */
19844 static bool
19845 rs6000_debug_secondary_memory_needed (machine_mode mode,
19846 reg_class_t from_class,
19847 reg_class_t to_class)
19849 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19851 fprintf (stderr,
19852 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19853 "to_class = %s, mode = %s\n",
19854 ret ? "true" : "false",
19855 reg_class_names[from_class],
19856 reg_class_names[to_class],
19857 GET_MODE_NAME (mode));
19859 return ret;
19862 /* Return the register class of a scratch register needed to copy IN into
19863 or out of a register in RCLASS in MODE. If it can be done directly,
19864 NO_REGS is returned. */
19866 static enum reg_class
19867 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19868 rtx in)
19870 int regno;
19872 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19873 #if TARGET_MACHO
19874 && MACHOPIC_INDIRECT
19875 #endif
19878 /* We cannot copy a symbolic operand directly into anything
19879 other than BASE_REGS for TARGET_ELF. So indicate that a
19880 register from BASE_REGS is needed as an intermediate
19881 register.
19883 On Darwin, pic addresses require a load from memory, which
19884 needs a base register. */
19885 if (rclass != BASE_REGS
19886 && (SYMBOL_REF_P (in)
19887 || GET_CODE (in) == HIGH
19888 || GET_CODE (in) == LABEL_REF
19889 || GET_CODE (in) == CONST))
19890 return BASE_REGS;
19893 if (REG_P (in))
19895 regno = REGNO (in);
19896 if (!HARD_REGISTER_NUM_P (regno))
19898 regno = true_regnum (in);
19899 if (!HARD_REGISTER_NUM_P (regno))
19900 regno = -1;
19903 else if (SUBREG_P (in))
19905 regno = true_regnum (in);
19906 if (!HARD_REGISTER_NUM_P (regno))
19907 regno = -1;
19909 else
19910 regno = -1;
19912 /* If we have VSX register moves, prefer moving scalar values between
19913 Altivec registers and GPR by going via an FPR (and then via memory)
19914 instead of reloading the secondary memory address for Altivec moves. */
19915 if (TARGET_VSX
19916 && GET_MODE_SIZE (mode) < 16
19917 && !mode_supports_vmx_dform (mode)
19918 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19919 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19920 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19921 && (regno >= 0 && INT_REGNO_P (regno)))))
19922 return FLOAT_REGS;
19924 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19925 into anything. */
19926 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19927 || (regno >= 0 && INT_REGNO_P (regno)))
19928 return NO_REGS;
19930 /* Constants, memory, and VSX registers can go into VSX registers (both the
19931 traditional floating point and the altivec registers). */
19932 if (rclass == VSX_REGS
19933 && (regno == -1 || VSX_REGNO_P (regno)))
19934 return NO_REGS;
19936 /* Constants, memory, and FP registers can go into FP registers. */
19937 if ((regno == -1 || FP_REGNO_P (regno))
19938 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
19939 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
19941 /* Memory, and AltiVec registers can go into AltiVec registers. */
19942 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
19943 && rclass == ALTIVEC_REGS)
19944 return NO_REGS;
19946 /* We can copy among the CR registers. */
19947 if ((rclass == CR_REGS || rclass == CR0_REGS)
19948 && regno >= 0 && CR_REGNO_P (regno))
19949 return NO_REGS;
19951 /* Otherwise, we need GENERAL_REGS. */
19952 return GENERAL_REGS;
19955 /* Debug version of rs6000_secondary_reload_class. */
19956 static enum reg_class
19957 rs6000_debug_secondary_reload_class (enum reg_class rclass,
19958 machine_mode mode, rtx in)
19960 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
19961 fprintf (stderr,
19962 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
19963 "mode = %s, input rtx:\n",
19964 reg_class_names[ret], reg_class_names[rclass],
19965 GET_MODE_NAME (mode));
19966 debug_rtx (in);
19968 return ret;
19971 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
19973 static bool
19974 rs6000_can_change_mode_class (machine_mode from,
19975 machine_mode to,
19976 reg_class_t rclass)
19978 unsigned from_size = GET_MODE_SIZE (from);
19979 unsigned to_size = GET_MODE_SIZE (to);
19981 if (from_size != to_size)
19983 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
19985 if (reg_classes_intersect_p (xclass, rclass))
19987 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
19988 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
19989 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
19990 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
19992 /* Don't allow 64-bit types to overlap with 128-bit types that take a
19993 single register under VSX because the scalar part of the register
19994 is in the upper 64-bits, and not the lower 64-bits. Types like
19995 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
19996 IEEE floating point can't overlap, and neither can small
19997 values. */
19999 if (to_float128_vector_p && from_float128_vector_p)
20000 return true;
20002 else if (to_float128_vector_p || from_float128_vector_p)
20003 return false;
20005 /* TDmode in floating-mode registers must always go into a register
20006 pair with the most significant word in the even-numbered register
20007 to match ISA requirements. In little-endian mode, this does not
20008 match subreg numbering, so we cannot allow subregs. */
20009 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20010 return false;
20012 if (from_size < 8 || to_size < 8)
20013 return false;
20015 if (from_size == 8 && (8 * to_nregs) != to_size)
20016 return false;
20018 if (to_size == 8 && (8 * from_nregs) != from_size)
20019 return false;
20021 return true;
20023 else
20024 return true;
20027 /* Since the VSX register set includes traditional floating point registers
20028 and altivec registers, just check for the size being different instead of
20029 trying to check whether the modes are vector modes. Otherwise it won't
20030 allow say DF and DI to change classes. For types like TFmode and TDmode
20031 that take 2 64-bit registers, rather than a single 128-bit register, don't
20032 allow subregs of those types to other 128 bit types. */
20033 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20035 unsigned num_regs = (from_size + 15) / 16;
20036 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20037 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20038 return false;
20040 return (from_size == 8 || from_size == 16);
20043 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20044 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20045 return false;
20047 return true;
20050 /* Debug version of rs6000_can_change_mode_class. */
20051 static bool
20052 rs6000_debug_can_change_mode_class (machine_mode from,
20053 machine_mode to,
20054 reg_class_t rclass)
20056 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20058 fprintf (stderr,
20059 "rs6000_can_change_mode_class, return %s, from = %s, "
20060 "to = %s, rclass = %s\n",
20061 ret ? "true" : "false",
20062 GET_MODE_NAME (from), GET_MODE_NAME (to),
20063 reg_class_names[rclass]);
20065 return ret;
20068 /* Return a string to do a move operation of 128 bits of data. */
20070 const char *
20071 rs6000_output_move_128bit (rtx operands[])
20073 rtx dest = operands[0];
20074 rtx src = operands[1];
20075 machine_mode mode = GET_MODE (dest);
20076 int dest_regno;
20077 int src_regno;
20078 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20079 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20081 if (REG_P (dest))
20083 dest_regno = REGNO (dest);
20084 dest_gpr_p = INT_REGNO_P (dest_regno);
20085 dest_fp_p = FP_REGNO_P (dest_regno);
20086 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20087 dest_vsx_p = dest_fp_p | dest_vmx_p;
20089 else
20091 dest_regno = -1;
20092 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20095 if (REG_P (src))
20097 src_regno = REGNO (src);
20098 src_gpr_p = INT_REGNO_P (src_regno);
20099 src_fp_p = FP_REGNO_P (src_regno);
20100 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20101 src_vsx_p = src_fp_p | src_vmx_p;
20103 else
20105 src_regno = -1;
20106 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20109 /* Register moves. */
20110 if (dest_regno >= 0 && src_regno >= 0)
20112 if (dest_gpr_p)
20114 if (src_gpr_p)
20115 return "#";
20117 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20118 return (WORDS_BIG_ENDIAN
20119 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20120 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20122 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20123 return "#";
20126 else if (TARGET_VSX && dest_vsx_p)
20128 if (src_vsx_p)
20129 return "xxlor %x0,%x1,%x1";
20131 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20132 return (WORDS_BIG_ENDIAN
20133 ? "mtvsrdd %x0,%1,%L1"
20134 : "mtvsrdd %x0,%L1,%1");
20136 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20137 return "#";
20140 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20141 return "vor %0,%1,%1";
20143 else if (dest_fp_p && src_fp_p)
20144 return "#";
20147 /* Loads. */
20148 else if (dest_regno >= 0 && MEM_P (src))
20150 if (dest_gpr_p)
20152 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20153 return "lq %0,%1";
20154 else
20155 return "#";
20158 else if (TARGET_ALTIVEC && dest_vmx_p
20159 && altivec_indexed_or_indirect_operand (src, mode))
20160 return "lvx %0,%y1";
20162 else if (TARGET_VSX && dest_vsx_p)
20164 if (mode_supports_dq_form (mode)
20165 && quad_address_p (XEXP (src, 0), mode, true))
20166 return "lxv %x0,%1";
20168 else if (TARGET_P9_VECTOR)
20169 return "lxvx %x0,%y1";
20171 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20172 return "lxvw4x %x0,%y1";
20174 else
20175 return "lxvd2x %x0,%y1";
20178 else if (TARGET_ALTIVEC && dest_vmx_p)
20179 return "lvx %0,%y1";
20181 else if (dest_fp_p)
20182 return "#";
20185 /* Stores. */
20186 else if (src_regno >= 0 && MEM_P (dest))
20188 if (src_gpr_p)
20190 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20191 return "stq %1,%0";
20192 else
20193 return "#";
20196 else if (TARGET_ALTIVEC && src_vmx_p
20197 && altivec_indexed_or_indirect_operand (dest, mode))
20198 return "stvx %1,%y0";
20200 else if (TARGET_VSX && src_vsx_p)
20202 if (mode_supports_dq_form (mode)
20203 && quad_address_p (XEXP (dest, 0), mode, true))
20204 return "stxv %x1,%0";
20206 else if (TARGET_P9_VECTOR)
20207 return "stxvx %x1,%y0";
20209 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20210 return "stxvw4x %x1,%y0";
20212 else
20213 return "stxvd2x %x1,%y0";
20216 else if (TARGET_ALTIVEC && src_vmx_p)
20217 return "stvx %1,%y0";
20219 else if (src_fp_p)
20220 return "#";
20223 /* Constants. */
20224 else if (dest_regno >= 0
20225 && (CONST_INT_P (src)
20226 || CONST_WIDE_INT_P (src)
20227 || CONST_DOUBLE_P (src)
20228 || GET_CODE (src) == CONST_VECTOR))
20230 if (dest_gpr_p)
20231 return "#";
20233 else if ((dest_vmx_p && TARGET_ALTIVEC)
20234 || (dest_vsx_p && TARGET_VSX))
20235 return output_vec_const_move (operands);
20238 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20241 /* Validate a 128-bit move. */
20242 bool
20243 rs6000_move_128bit_ok_p (rtx operands[])
20245 machine_mode mode = GET_MODE (operands[0]);
20246 return (gpc_reg_operand (operands[0], mode)
20247 || gpc_reg_operand (operands[1], mode));
20250 /* Return true if a 128-bit move needs to be split. */
20251 bool
20252 rs6000_split_128bit_ok_p (rtx operands[])
20254 if (!reload_completed)
20255 return false;
20257 if (!gpr_or_gpr_p (operands[0], operands[1]))
20258 return false;
20260 if (quad_load_store_p (operands[0], operands[1]))
20261 return false;
20263 return true;
20267 /* Given a comparison operation, return the bit number in CCR to test. We
20268 know this is a valid comparison.
20270 SCC_P is 1 if this is for an scc. That means that %D will have been
20271 used instead of %C, so the bits will be in different places.
20273 Return -1 if OP isn't a valid comparison for some reason. */
20276 ccr_bit (rtx op, int scc_p)
20278 enum rtx_code code = GET_CODE (op);
20279 machine_mode cc_mode;
20280 int cc_regnum;
20281 int base_bit;
20282 rtx reg;
20284 if (!COMPARISON_P (op))
20285 return -1;
20287 reg = XEXP (op, 0);
20289 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20290 return -1;
20292 cc_mode = GET_MODE (reg);
20293 cc_regnum = REGNO (reg);
20294 base_bit = 4 * (cc_regnum - CR0_REGNO);
20296 validate_condition_mode (code, cc_mode);
20298 /* When generating a sCOND operation, only positive conditions are
20299 allowed. */
20300 if (scc_p)
20301 switch (code)
20303 case EQ:
20304 case GT:
20305 case LT:
20306 case UNORDERED:
20307 case GTU:
20308 case LTU:
20309 break;
20310 default:
20311 return -1;
20314 switch (code)
20316 case NE:
20317 return scc_p ? base_bit + 3 : base_bit + 2;
20318 case EQ:
20319 return base_bit + 2;
20320 case GT: case GTU: case UNLE:
20321 return base_bit + 1;
20322 case LT: case LTU: case UNGE:
20323 return base_bit;
20324 case ORDERED: case UNORDERED:
20325 return base_bit + 3;
20327 case GE: case GEU:
20328 /* If scc, we will have done a cror to put the bit in the
20329 unordered position. So test that bit. For integer, this is ! LT
20330 unless this is an scc insn. */
20331 return scc_p ? base_bit + 3 : base_bit;
20333 case LE: case LEU:
20334 return scc_p ? base_bit + 3 : base_bit + 1;
20336 default:
20337 return -1;
20341 /* Return the GOT register. */
20344 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20346 /* The second flow pass currently (June 1999) can't update
20347 regs_ever_live without disturbing other parts of the compiler, so
20348 update it here to make the prolog/epilogue code happy. */
20349 if (!can_create_pseudo_p ()
20350 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20351 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20353 crtl->uses_pic_offset_table = 1;
20355 return pic_offset_table_rtx;
20358 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20360 /* Write out a function code label. */
20362 void
20363 rs6000_output_function_entry (FILE *file, const char *fname)
20365 if (fname[0] != '.')
20367 switch (DEFAULT_ABI)
20369 default:
20370 gcc_unreachable ();
20372 case ABI_AIX:
20373 if (DOT_SYMBOLS)
20374 putc ('.', file);
20375 else
20376 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20377 break;
20379 case ABI_ELFv2:
20380 case ABI_V4:
20381 case ABI_DARWIN:
20382 break;
20386 RS6000_OUTPUT_BASENAME (file, fname);
20389 /* Print an operand. Recognize special options, documented below. */
20391 #if TARGET_ELF
20392 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20393 only introduced by the linker, when applying the sda21
20394 relocation. */
20395 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20396 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20397 #else
20398 #define SMALL_DATA_RELOC "sda21"
20399 #define SMALL_DATA_REG 0
20400 #endif
20402 void
20403 print_operand (FILE *file, rtx x, int code)
20405 int i;
20406 unsigned HOST_WIDE_INT uval;
20408 switch (code)
20410 /* %a is output_address. */
20412 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20413 output_operand. */
20415 case 'D':
20416 /* Like 'J' but get to the GT bit only. */
20417 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20419 output_operand_lossage ("invalid %%D value");
20420 return;
20423 /* Bit 1 is GT bit. */
20424 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20426 /* Add one for shift count in rlinm for scc. */
20427 fprintf (file, "%d", i + 1);
20428 return;
20430 case 'e':
20431 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20432 if (! INT_P (x))
20434 output_operand_lossage ("invalid %%e value");
20435 return;
20438 uval = INTVAL (x);
20439 if ((uval & 0xffff) == 0 && uval != 0)
20440 putc ('s', file);
20441 return;
20443 case 'E':
20444 /* X is a CR register. Print the number of the EQ bit of the CR */
20445 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20446 output_operand_lossage ("invalid %%E value");
20447 else
20448 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20449 return;
20451 case 'f':
20452 /* X is a CR register. Print the shift count needed to move it
20453 to the high-order four bits. */
20454 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20455 output_operand_lossage ("invalid %%f value");
20456 else
20457 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20458 return;
20460 case 'F':
20461 /* Similar, but print the count for the rotate in the opposite
20462 direction. */
20463 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20464 output_operand_lossage ("invalid %%F value");
20465 else
20466 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20467 return;
20469 case 'G':
20470 /* X is a constant integer. If it is negative, print "m",
20471 otherwise print "z". This is to make an aze or ame insn. */
20472 if (!CONST_INT_P (x))
20473 output_operand_lossage ("invalid %%G value");
20474 else if (INTVAL (x) >= 0)
20475 putc ('z', file);
20476 else
20477 putc ('m', file);
20478 return;
20480 case 'h':
20481 /* If constant, output low-order five bits. Otherwise, write
20482 normally. */
20483 if (INT_P (x))
20484 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20485 else
20486 print_operand (file, x, 0);
20487 return;
20489 case 'H':
20490 /* If constant, output low-order six bits. Otherwise, write
20491 normally. */
20492 if (INT_P (x))
20493 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20494 else
20495 print_operand (file, x, 0);
20496 return;
20498 case 'I':
20499 /* Print `i' if this is a constant, else nothing. */
20500 if (INT_P (x))
20501 putc ('i', file);
20502 return;
20504 case 'j':
20505 /* Write the bit number in CCR for jump. */
20506 i = ccr_bit (x, 0);
20507 if (i == -1)
20508 output_operand_lossage ("invalid %%j code");
20509 else
20510 fprintf (file, "%d", i);
20511 return;
20513 case 'J':
20514 /* Similar, but add one for shift count in rlinm for scc and pass
20515 scc flag to `ccr_bit'. */
20516 i = ccr_bit (x, 1);
20517 if (i == -1)
20518 output_operand_lossage ("invalid %%J code");
20519 else
20520 /* If we want bit 31, write a shift count of zero, not 32. */
20521 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20522 return;
20524 case 'k':
20525 /* X must be a constant. Write the 1's complement of the
20526 constant. */
20527 if (! INT_P (x))
20528 output_operand_lossage ("invalid %%k value");
20529 else
20530 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20531 return;
20533 case 'K':
20534 /* X must be a symbolic constant on ELF. Write an
20535 expression suitable for an 'addi' that adds in the low 16
20536 bits of the MEM. */
20537 if (GET_CODE (x) == CONST)
20539 if (GET_CODE (XEXP (x, 0)) != PLUS
20540 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20541 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20542 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20543 output_operand_lossage ("invalid %%K value");
20545 print_operand_address (file, x);
20546 fputs ("@l", file);
20547 return;
20549 /* %l is output_asm_label. */
20551 case 'L':
20552 /* Write second word of DImode or DFmode reference. Works on register
20553 or non-indexed memory only. */
20554 if (REG_P (x))
20555 fputs (reg_names[REGNO (x) + 1], file);
20556 else if (MEM_P (x))
20558 machine_mode mode = GET_MODE (x);
20559 /* Handle possible auto-increment. Since it is pre-increment and
20560 we have already done it, we can just use an offset of word. */
20561 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20562 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20563 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20564 UNITS_PER_WORD));
20565 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20566 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20567 UNITS_PER_WORD));
20568 else
20569 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20570 UNITS_PER_WORD),
20571 0));
20573 if (small_data_operand (x, GET_MODE (x)))
20574 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20575 reg_names[SMALL_DATA_REG]);
20577 return;
20579 case 'N': /* Unused */
20580 /* Write the number of elements in the vector times 4. */
20581 if (GET_CODE (x) != PARALLEL)
20582 output_operand_lossage ("invalid %%N value");
20583 else
20584 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20585 return;
20587 case 'O': /* Unused */
20588 /* Similar, but subtract 1 first. */
20589 if (GET_CODE (x) != PARALLEL)
20590 output_operand_lossage ("invalid %%O value");
20591 else
20592 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20593 return;
20595 case 'p':
20596 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20597 if (! INT_P (x)
20598 || INTVAL (x) < 0
20599 || (i = exact_log2 (INTVAL (x))) < 0)
20600 output_operand_lossage ("invalid %%p value");
20601 else
20602 fprintf (file, "%d", i);
20603 return;
20605 case 'P':
20606 /* The operand must be an indirect memory reference. The result
20607 is the register name. */
20608 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20609 || REGNO (XEXP (x, 0)) >= 32)
20610 output_operand_lossage ("invalid %%P value");
20611 else
20612 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20613 return;
20615 case 'q':
20616 /* This outputs the logical code corresponding to a boolean
20617 expression. The expression may have one or both operands
20618 negated (if one, only the first one). For condition register
20619 logical operations, it will also treat the negated
20620 CR codes as NOTs, but not handle NOTs of them. */
20622 const char *const *t = 0;
20623 const char *s;
20624 enum rtx_code code = GET_CODE (x);
20625 static const char * const tbl[3][3] = {
20626 { "and", "andc", "nor" },
20627 { "or", "orc", "nand" },
20628 { "xor", "eqv", "xor" } };
20630 if (code == AND)
20631 t = tbl[0];
20632 else if (code == IOR)
20633 t = tbl[1];
20634 else if (code == XOR)
20635 t = tbl[2];
20636 else
20637 output_operand_lossage ("invalid %%q value");
20639 if (GET_CODE (XEXP (x, 0)) != NOT)
20640 s = t[0];
20641 else
20643 if (GET_CODE (XEXP (x, 1)) == NOT)
20644 s = t[2];
20645 else
20646 s = t[1];
20649 fputs (s, file);
20651 return;
20653 case 'Q':
20654 if (! TARGET_MFCRF)
20655 return;
20656 fputc (',', file);
20657 /* FALLTHRU */
20659 case 'R':
20660 /* X is a CR register. Print the mask for `mtcrf'. */
20661 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20662 output_operand_lossage ("invalid %%R value");
20663 else
20664 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20665 return;
20667 case 's':
20668 /* Low 5 bits of 32 - value */
20669 if (! INT_P (x))
20670 output_operand_lossage ("invalid %%s value");
20671 else
20672 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20673 return;
20675 case 't':
20676 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20677 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20679 output_operand_lossage ("invalid %%t value");
20680 return;
20683 /* Bit 3 is OV bit. */
20684 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20686 /* If we want bit 31, write a shift count of zero, not 32. */
20687 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20688 return;
20690 case 'T':
20691 /* Print the symbolic name of a branch target register. */
20692 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20693 x = XVECEXP (x, 0, 0);
20694 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20695 && REGNO (x) != CTR_REGNO))
20696 output_operand_lossage ("invalid %%T value");
20697 else if (REGNO (x) == LR_REGNO)
20698 fputs ("lr", file);
20699 else
20700 fputs ("ctr", file);
20701 return;
20703 case 'u':
20704 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20705 for use in unsigned operand. */
20706 if (! INT_P (x))
20708 output_operand_lossage ("invalid %%u value");
20709 return;
20712 uval = INTVAL (x);
20713 if ((uval & 0xffff) == 0)
20714 uval >>= 16;
20716 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20717 return;
20719 case 'v':
20720 /* High-order 16 bits of constant for use in signed operand. */
20721 if (! INT_P (x))
20722 output_operand_lossage ("invalid %%v value");
20723 else
20724 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20725 (INTVAL (x) >> 16) & 0xffff);
20726 return;
20728 case 'U':
20729 /* Print `u' if this has an auto-increment or auto-decrement. */
20730 if (MEM_P (x)
20731 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20732 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20733 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20734 putc ('u', file);
20735 return;
20737 case 'V':
20738 /* Print the trap code for this operand. */
20739 switch (GET_CODE (x))
20741 case EQ:
20742 fputs ("eq", file); /* 4 */
20743 break;
20744 case NE:
20745 fputs ("ne", file); /* 24 */
20746 break;
20747 case LT:
20748 fputs ("lt", file); /* 16 */
20749 break;
20750 case LE:
20751 fputs ("le", file); /* 20 */
20752 break;
20753 case GT:
20754 fputs ("gt", file); /* 8 */
20755 break;
20756 case GE:
20757 fputs ("ge", file); /* 12 */
20758 break;
20759 case LTU:
20760 fputs ("llt", file); /* 2 */
20761 break;
20762 case LEU:
20763 fputs ("lle", file); /* 6 */
20764 break;
20765 case GTU:
20766 fputs ("lgt", file); /* 1 */
20767 break;
20768 case GEU:
20769 fputs ("lge", file); /* 5 */
20770 break;
20771 default:
20772 output_operand_lossage ("invalid %%V value");
20774 break;
20776 case 'w':
20777 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20778 normally. */
20779 if (INT_P (x))
20780 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20781 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20782 else
20783 print_operand (file, x, 0);
20784 return;
20786 case 'x':
20787 /* X is a FPR or Altivec register used in a VSX context. */
20788 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20789 output_operand_lossage ("invalid %%x value");
20790 else
20792 int reg = REGNO (x);
20793 int vsx_reg = (FP_REGNO_P (reg)
20794 ? reg - 32
20795 : reg - FIRST_ALTIVEC_REGNO + 32);
20797 #ifdef TARGET_REGNAMES
20798 if (TARGET_REGNAMES)
20799 fprintf (file, "%%vs%d", vsx_reg);
20800 else
20801 #endif
20802 fprintf (file, "%d", vsx_reg);
20804 return;
20806 case 'X':
20807 if (MEM_P (x)
20808 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20809 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20810 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20811 putc ('x', file);
20812 return;
20814 case 'Y':
20815 /* Like 'L', for third word of TImode/PTImode */
20816 if (REG_P (x))
20817 fputs (reg_names[REGNO (x) + 2], file);
20818 else if (MEM_P (x))
20820 machine_mode mode = GET_MODE (x);
20821 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20822 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20823 output_address (mode, plus_constant (Pmode,
20824 XEXP (XEXP (x, 0), 0), 8));
20825 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20826 output_address (mode, plus_constant (Pmode,
20827 XEXP (XEXP (x, 0), 0), 8));
20828 else
20829 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20830 if (small_data_operand (x, GET_MODE (x)))
20831 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20832 reg_names[SMALL_DATA_REG]);
20834 return;
20836 case 'z':
20837 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20838 x = XVECEXP (x, 0, 1);
20839 /* X is a SYMBOL_REF. Write out the name preceded by a
20840 period and without any trailing data in brackets. Used for function
20841 names. If we are configured for System V (or the embedded ABI) on
20842 the PowerPC, do not emit the period, since those systems do not use
20843 TOCs and the like. */
20844 if (!SYMBOL_REF_P (x))
20846 output_operand_lossage ("invalid %%z value");
20847 return;
20850 /* For macho, check to see if we need a stub. */
20851 if (TARGET_MACHO)
20853 const char *name = XSTR (x, 0);
20854 #if TARGET_MACHO
20855 if (darwin_picsymbol_stubs
20856 && MACHOPIC_INDIRECT
20857 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20858 name = machopic_indirection_name (x, /*stub_p=*/true);
20859 #endif
20860 assemble_name (file, name);
20862 else if (!DOT_SYMBOLS)
20863 assemble_name (file, XSTR (x, 0));
20864 else
20865 rs6000_output_function_entry (file, XSTR (x, 0));
20866 return;
20868 case 'Z':
20869 /* Like 'L', for last word of TImode/PTImode. */
20870 if (REG_P (x))
20871 fputs (reg_names[REGNO (x) + 3], file);
20872 else if (MEM_P (x))
20874 machine_mode mode = GET_MODE (x);
20875 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20876 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20877 output_address (mode, plus_constant (Pmode,
20878 XEXP (XEXP (x, 0), 0), 12));
20879 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20880 output_address (mode, plus_constant (Pmode,
20881 XEXP (XEXP (x, 0), 0), 12));
20882 else
20883 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20884 if (small_data_operand (x, GET_MODE (x)))
20885 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20886 reg_names[SMALL_DATA_REG]);
20888 return;
20890 /* Print AltiVec memory operand. */
20891 case 'y':
20893 rtx tmp;
20895 gcc_assert (MEM_P (x));
20897 tmp = XEXP (x, 0);
20899 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20900 && GET_CODE (tmp) == AND
20901 && CONST_INT_P (XEXP (tmp, 1))
20902 && INTVAL (XEXP (tmp, 1)) == -16)
20903 tmp = XEXP (tmp, 0);
20904 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20905 && GET_CODE (tmp) == PRE_MODIFY)
20906 tmp = XEXP (tmp, 1);
20907 if (REG_P (tmp))
20908 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20909 else
20911 if (GET_CODE (tmp) != PLUS
20912 || !REG_P (XEXP (tmp, 0))
20913 || !REG_P (XEXP (tmp, 1)))
20915 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
20916 break;
20919 if (REGNO (XEXP (tmp, 0)) == 0)
20920 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
20921 reg_names[ REGNO (XEXP (tmp, 0)) ]);
20922 else
20923 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
20924 reg_names[ REGNO (XEXP (tmp, 1)) ]);
20926 break;
20929 case 0:
20930 if (REG_P (x))
20931 fprintf (file, "%s", reg_names[REGNO (x)]);
20932 else if (MEM_P (x))
20934 /* We need to handle PRE_INC and PRE_DEC here, since we need to
20935 know the width from the mode. */
20936 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
20937 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
20938 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20939 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
20940 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
20941 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20942 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20943 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
20944 else
20945 output_address (GET_MODE (x), XEXP (x, 0));
20947 else if (toc_relative_expr_p (x, false,
20948 &tocrel_base_oac, &tocrel_offset_oac))
20949 /* This hack along with a corresponding hack in
20950 rs6000_output_addr_const_extra arranges to output addends
20951 where the assembler expects to find them. eg.
20952 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
20953 without this hack would be output as "x@toc+4". We
20954 want "x+4@toc". */
20955 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
20956 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
20957 output_addr_const (file, XVECEXP (x, 0, 0));
20958 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20959 output_addr_const (file, XVECEXP (x, 0, 1));
20960 else
20961 output_addr_const (file, x);
20962 return;
20964 case '&':
20965 if (const char *name = get_some_local_dynamic_name ())
20966 assemble_name (file, name);
20967 else
20968 output_operand_lossage ("'%%&' used without any "
20969 "local dynamic TLS references");
20970 return;
20972 default:
20973 output_operand_lossage ("invalid %%xn code");
20977 /* Print the address of an operand. */
20979 void
20980 print_operand_address (FILE *file, rtx x)
20982 if (REG_P (x))
20983 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
20985 /* Is it a pc-relative address? */
20986 else if (pcrel_address (x, Pmode))
20988 HOST_WIDE_INT offset;
20990 if (GET_CODE (x) == CONST)
20991 x = XEXP (x, 0);
20993 if (GET_CODE (x) == PLUS)
20995 offset = INTVAL (XEXP (x, 1));
20996 x = XEXP (x, 0);
20998 else
20999 offset = 0;
21001 output_addr_const (file, x);
21003 if (offset)
21004 fprintf (file, "%+" PRId64, offset);
21006 fputs ("@pcrel", file);
21008 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21009 || GET_CODE (x) == LABEL_REF)
21011 output_addr_const (file, x);
21012 if (small_data_operand (x, GET_MODE (x)))
21013 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21014 reg_names[SMALL_DATA_REG]);
21015 else
21016 gcc_assert (!TARGET_TOC);
21018 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21019 && REG_P (XEXP (x, 1)))
21021 if (REGNO (XEXP (x, 0)) == 0)
21022 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21023 reg_names[ REGNO (XEXP (x, 0)) ]);
21024 else
21025 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21026 reg_names[ REGNO (XEXP (x, 1)) ]);
21028 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21029 && CONST_INT_P (XEXP (x, 1)))
21030 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21031 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21032 #if TARGET_MACHO
21033 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21034 && CONSTANT_P (XEXP (x, 1)))
21036 fprintf (file, "lo16(");
21037 output_addr_const (file, XEXP (x, 1));
21038 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21040 #endif
21041 #if TARGET_ELF
21042 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21043 && CONSTANT_P (XEXP (x, 1)))
21045 output_addr_const (file, XEXP (x, 1));
21046 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21048 #endif
21049 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21051 /* This hack along with a corresponding hack in
21052 rs6000_output_addr_const_extra arranges to output addends
21053 where the assembler expects to find them. eg.
21054 (lo_sum (reg 9)
21055 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21056 without this hack would be output as "x@toc+8@l(9)". We
21057 want "x+8@toc@l(9)". */
21058 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21059 if (GET_CODE (x) == LO_SUM)
21060 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21061 else
21062 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21064 else
21065 output_addr_const (file, x);
21068 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21070 static bool
21071 rs6000_output_addr_const_extra (FILE *file, rtx x)
21073 if (GET_CODE (x) == UNSPEC)
21074 switch (XINT (x, 1))
21076 case UNSPEC_TOCREL:
21077 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21078 && REG_P (XVECEXP (x, 0, 1))
21079 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21080 output_addr_const (file, XVECEXP (x, 0, 0));
21081 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21083 if (INTVAL (tocrel_offset_oac) >= 0)
21084 fprintf (file, "+");
21085 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21087 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21089 putc ('-', file);
21090 assemble_name (file, toc_label_name);
21091 need_toc_init = 1;
21093 else if (TARGET_ELF)
21094 fputs ("@toc", file);
21095 return true;
21097 #if TARGET_MACHO
21098 case UNSPEC_MACHOPIC_OFFSET:
21099 output_addr_const (file, XVECEXP (x, 0, 0));
21100 putc ('-', file);
21101 machopic_output_function_base_name (file);
21102 return true;
21103 #endif
21105 return false;
21108 /* Target hook for assembling integer objects. The PowerPC version has
21109 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21110 is defined. It also needs to handle DI-mode objects on 64-bit
21111 targets. */
21113 static bool
21114 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21116 #ifdef RELOCATABLE_NEEDS_FIXUP
21117 /* Special handling for SI values. */
21118 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21120 static int recurse = 0;
21122 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21123 the .fixup section. Since the TOC section is already relocated, we
21124 don't need to mark it here. We used to skip the text section, but it
21125 should never be valid for relocated addresses to be placed in the text
21126 section. */
21127 if (DEFAULT_ABI == ABI_V4
21128 && (TARGET_RELOCATABLE || flag_pic > 1)
21129 && in_section != toc_section
21130 && !recurse
21131 && !CONST_SCALAR_INT_P (x)
21132 && CONSTANT_P (x))
21134 char buf[256];
21136 recurse = 1;
21137 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21138 fixuplabelno++;
21139 ASM_OUTPUT_LABEL (asm_out_file, buf);
21140 fprintf (asm_out_file, "\t.long\t(");
21141 output_addr_const (asm_out_file, x);
21142 fprintf (asm_out_file, ")@fixup\n");
21143 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21144 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21145 fprintf (asm_out_file, "\t.long\t");
21146 assemble_name (asm_out_file, buf);
21147 fprintf (asm_out_file, "\n\t.previous\n");
21148 recurse = 0;
21149 return true;
21151 /* Remove initial .'s to turn a -mcall-aixdesc function
21152 address into the address of the descriptor, not the function
21153 itself. */
21154 else if (SYMBOL_REF_P (x)
21155 && XSTR (x, 0)[0] == '.'
21156 && DEFAULT_ABI == ABI_AIX)
21158 const char *name = XSTR (x, 0);
21159 while (*name == '.')
21160 name++;
21162 fprintf (asm_out_file, "\t.long\t%s\n", name);
21163 return true;
21166 #endif /* RELOCATABLE_NEEDS_FIXUP */
21167 return default_assemble_integer (x, size, aligned_p);
21170 /* Return a template string for assembly to emit when making an
21171 external call. FUNOP is the call mem argument operand number. */
21173 static const char *
21174 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21176 /* -Wformat-overflow workaround, without which gcc thinks that %u
21177 might produce 10 digits. */
21178 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21180 char arg[12];
21181 arg[0] = 0;
21182 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21184 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21185 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21186 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21187 sprintf (arg, "(%%&@tlsld)");
21188 else
21189 gcc_unreachable ();
21192 /* The magic 32768 offset here corresponds to the offset of
21193 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21194 char z[11];
21195 sprintf (z, "%%z%u%s", funop,
21196 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21197 ? "+32768" : ""));
21199 static char str[32]; /* 1 spare */
21200 if (rs6000_pcrel_p (cfun))
21201 sprintf (str, "b%s %s@notoc%s", sibcall ? "" : "l", z, arg);
21202 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21203 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21204 sibcall ? "" : "\n\tnop");
21205 else if (DEFAULT_ABI == ABI_V4)
21206 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21207 flag_pic ? "@plt" : "");
21208 #if TARGET_MACHO
21209 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21210 else if (DEFAULT_ABI == ABI_DARWIN)
21212 /* The cookie is in operand func+2. */
21213 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21214 int cookie = INTVAL (operands[funop + 2]);
21215 if (cookie & CALL_LONG)
21217 tree funname = get_identifier (XSTR (operands[funop], 0));
21218 tree labelname = get_prev_label (funname);
21219 gcc_checking_assert (labelname && !sibcall);
21221 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21222 instruction will reach 'foo', otherwise link as 'bl L42'".
21223 "L42" should be a 'branch island', that will do a far jump to
21224 'foo'. Branch islands are generated in
21225 macho_branch_islands(). */
21226 sprintf (str, "jbsr %%z%u,%.10s", funop,
21227 IDENTIFIER_POINTER (labelname));
21229 else
21230 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21231 after the call. */
21232 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21234 #endif
21235 else
21236 gcc_unreachable ();
21237 return str;
21240 const char *
21241 rs6000_call_template (rtx *operands, unsigned int funop)
21243 return rs6000_call_template_1 (operands, funop, false);
21246 const char *
21247 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21249 return rs6000_call_template_1 (operands, funop, true);
21252 /* As above, for indirect calls. */
21254 static const char *
21255 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21256 bool sibcall)
21258 /* -Wformat-overflow workaround, without which gcc thinks that %u
21259 might produce 10 digits. Note that -Wformat-overflow will not
21260 currently warn here for str[], so do not rely on a warning to
21261 ensure str[] is correctly sized. */
21262 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21264 /* Currently, funop is either 0 or 1. The maximum string is always
21265 a !speculate 64-bit __tls_get_addr call.
21267 ABI_ELFv2, pcrel:
21268 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21269 . 35 .reloc .,R_PPC64_PLTSEQ_NOTOC,%z1\n\t
21270 . 9 crset 2\n\t
21271 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21272 . 36 .reloc .,R_PPC64_PLTCALL_NOTOC,%z1\n\t
21273 . 8 beq%T1l-
21274 .---
21275 .142
21277 ABI_AIX:
21278 . 9 ld 2,%3\n\t
21279 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21280 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21281 . 9 crset 2\n\t
21282 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21283 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21284 . 10 beq%T1l-\n\t
21285 . 10 ld 2,%4(1)
21286 .---
21287 .151
21289 ABI_ELFv2:
21290 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21291 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21292 . 9 crset 2\n\t
21293 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21294 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21295 . 10 beq%T1l-\n\t
21296 . 10 ld 2,%3(1)
21297 .---
21298 .142
21300 ABI_V4:
21301 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21302 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21303 . 9 crset 2\n\t
21304 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21305 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21306 . 8 beq%T1l-
21307 .---
21308 .141 */
21309 static char str[160]; /* 8 spare */
21310 char *s = str;
21311 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21313 if (DEFAULT_ABI == ABI_AIX)
21314 s += sprintf (s,
21315 "l%s 2,%%%u\n\t",
21316 ptrload, funop + 2);
21318 /* We don't need the extra code to stop indirect call speculation if
21319 calling via LR. */
21320 bool speculate = (TARGET_MACHO
21321 || rs6000_speculate_indirect_jumps
21322 || (REG_P (operands[funop])
21323 && REGNO (operands[funop]) == LR_REGNO));
21325 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21327 const char *rel64 = TARGET_64BIT ? "64" : "";
21328 char tls[29];
21329 tls[0] = 0;
21330 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21332 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21333 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21334 rel64, funop + 1);
21335 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21336 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21337 rel64);
21338 else
21339 gcc_unreachable ();
21342 const char *notoc = rs6000_pcrel_p (cfun) ? "_NOTOC" : "";
21343 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21344 && flag_pic == 2 ? "+32768" : "");
21345 if (!speculate)
21347 s += sprintf (s,
21348 "%s.reloc .,R_PPC%s_PLTSEQ%s,%%z%u%s\n\t",
21349 tls, rel64, notoc, funop, addend);
21350 s += sprintf (s, "crset 2\n\t");
21352 s += sprintf (s,
21353 "%s.reloc .,R_PPC%s_PLTCALL%s,%%z%u%s\n\t",
21354 tls, rel64, notoc, funop, addend);
21356 else if (!speculate)
21357 s += sprintf (s, "crset 2\n\t");
21359 if (rs6000_pcrel_p (cfun))
21361 if (speculate)
21362 sprintf (s, "b%%T%ul", funop);
21363 else
21364 sprintf (s, "beq%%T%ul-", funop);
21366 else if (DEFAULT_ABI == ABI_AIX)
21368 if (speculate)
21369 sprintf (s,
21370 "b%%T%ul\n\t"
21371 "l%s 2,%%%u(1)",
21372 funop, ptrload, funop + 3);
21373 else
21374 sprintf (s,
21375 "beq%%T%ul-\n\t"
21376 "l%s 2,%%%u(1)",
21377 funop, ptrload, funop + 3);
21379 else if (DEFAULT_ABI == ABI_ELFv2)
21381 if (speculate)
21382 sprintf (s,
21383 "b%%T%ul\n\t"
21384 "l%s 2,%%%u(1)",
21385 funop, ptrload, funop + 2);
21386 else
21387 sprintf (s,
21388 "beq%%T%ul-\n\t"
21389 "l%s 2,%%%u(1)",
21390 funop, ptrload, funop + 2);
21392 else
21394 if (speculate)
21395 sprintf (s,
21396 "b%%T%u%s",
21397 funop, sibcall ? "" : "l");
21398 else
21399 sprintf (s,
21400 "beq%%T%u%s-%s",
21401 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21403 return str;
21406 const char *
21407 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21409 return rs6000_indirect_call_template_1 (operands, funop, false);
21412 const char *
21413 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21415 return rs6000_indirect_call_template_1 (operands, funop, true);
21418 #if HAVE_AS_PLTSEQ
21419 /* Output indirect call insns. WHICH identifies the type of sequence. */
21420 const char *
21421 rs6000_pltseq_template (rtx *operands, int which)
21423 const char *rel64 = TARGET_64BIT ? "64" : "";
21424 char tls[30];
21425 tls[0] = 0;
21426 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21428 char off = which == RS6000_PLTSEQ_PLT_PCREL34 ? '8' : '4';
21429 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21430 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSGD,%%3\n\t",
21431 off, rel64);
21432 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21433 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSLD,%%&\n\t",
21434 off, rel64);
21435 else
21436 gcc_unreachable ();
21439 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21440 static char str[96]; /* 10 spare */
21441 char off = WORDS_BIG_ENDIAN ? '2' : '4';
21442 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21443 && flag_pic == 2 ? "+32768" : "");
21444 switch (which)
21446 case RS6000_PLTSEQ_TOCSAVE:
21447 sprintf (str,
21448 "st%s\n\t"
21449 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2",
21450 TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)",
21451 tls, rel64);
21452 break;
21453 case RS6000_PLTSEQ_PLT16_HA:
21454 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21455 sprintf (str,
21456 "lis %%0,0\n\t"
21457 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2",
21458 tls, off, rel64);
21459 else
21460 sprintf (str,
21461 "addis %%0,%%1,0\n\t"
21462 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2%s",
21463 tls, off, rel64, addend);
21464 break;
21465 case RS6000_PLTSEQ_PLT16_LO:
21466 sprintf (str,
21467 "l%s %%0,0(%%1)\n\t"
21468 "%s.reloc .-%c,R_PPC%s_PLT16_LO%s,%%z2%s",
21469 TARGET_64BIT ? "d" : "wz",
21470 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend);
21471 break;
21472 case RS6000_PLTSEQ_MTCTR:
21473 sprintf (str,
21474 "mtctr %%1\n\t"
21475 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2%s",
21476 tls, rel64, addend);
21477 break;
21478 case RS6000_PLTSEQ_PLT_PCREL34:
21479 sprintf (str,
21480 "pl%s %%0,0(0),1\n\t"
21481 "%s.reloc .-8,R_PPC%s_PLT_PCREL34_NOTOC,%%z2",
21482 TARGET_64BIT ? "d" : "wz",
21483 tls, rel64);
21484 break;
21485 default:
21486 gcc_unreachable ();
21488 return str;
21490 #endif
21492 /* Helper function to return whether a MODE can do prefixed loads/stores.
21493 VOIDmode is used when we are loading the pc-relative address into a base
21494 register, but we are not using it as part of a memory operation. As modes
21495 add support for prefixed memory, they will be added here. */
21497 static bool
21498 mode_supports_prefixed_address_p (machine_mode mode)
21500 return mode == VOIDmode;
21503 /* Function to return true if ADDR is a valid prefixed memory address that uses
21504 mode MODE. */
21506 bool
21507 rs6000_prefixed_address_mode_p (rtx addr, machine_mode mode)
21509 if (!TARGET_PREFIXED_ADDR || !mode_supports_prefixed_address_p (mode))
21510 return false;
21512 /* Check for PC-relative addresses. */
21513 if (pcrel_address (addr, Pmode))
21514 return true;
21516 /* Check for prefixed memory addresses that have a large numeric offset,
21517 or an offset that can't be used for a DS/DQ-form memory operation. */
21518 if (GET_CODE (addr) == PLUS)
21520 rtx op0 = XEXP (addr, 0);
21521 rtx op1 = XEXP (addr, 1);
21523 if (!base_reg_operand (op0, Pmode) || !CONST_INT_P (op1))
21524 return false;
21526 HOST_WIDE_INT value = INTVAL (op1);
21527 if (!SIGNED_34BIT_OFFSET_P (value))
21528 return false;
21530 /* Offset larger than 16-bits? */
21531 if (!SIGNED_16BIT_OFFSET_P (value))
21532 return true;
21534 /* DQ instruction (bottom 4 bits must be 0) for vectors. */
21535 HOST_WIDE_INT mask;
21536 if (GET_MODE_SIZE (mode) >= 16)
21537 mask = 15;
21539 /* DS instruction (bottom 2 bits must be 0). For 32-bit integers, we
21540 need to use DS instructions if we are sign-extending the value with
21541 LWA. For 32-bit floating point, we need DS instructions to load and
21542 store values to the traditional Altivec registers. */
21543 else if (GET_MODE_SIZE (mode) >= 4)
21544 mask = 3;
21546 /* QImode/HImode has no restrictions. */
21547 else
21548 return true;
21550 /* Return true if we must use a prefixed instruction. */
21551 return (value & mask) != 0;
21554 return false;
21557 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21558 /* Emit an assembler directive to set symbol visibility for DECL to
21559 VISIBILITY_TYPE. */
21561 static void
21562 rs6000_assemble_visibility (tree decl, int vis)
21564 if (TARGET_XCOFF)
21565 return;
21567 /* Functions need to have their entry point symbol visibility set as
21568 well as their descriptor symbol visibility. */
21569 if (DEFAULT_ABI == ABI_AIX
21570 && DOT_SYMBOLS
21571 && TREE_CODE (decl) == FUNCTION_DECL)
21573 static const char * const visibility_types[] = {
21574 NULL, "protected", "hidden", "internal"
21577 const char *name, *type;
21579 name = ((* targetm.strip_name_encoding)
21580 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21581 type = visibility_types[vis];
21583 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21584 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21586 else
21587 default_assemble_visibility (decl, vis);
21589 #endif
21591 enum rtx_code
21592 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21594 /* Reversal of FP compares takes care -- an ordered compare
21595 becomes an unordered compare and vice versa. */
21596 if (mode == CCFPmode
21597 && (!flag_finite_math_only
21598 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21599 || code == UNEQ || code == LTGT))
21600 return reverse_condition_maybe_unordered (code);
21601 else
21602 return reverse_condition (code);
21605 /* Generate a compare for CODE. Return a brand-new rtx that
21606 represents the result of the compare. */
21608 static rtx
21609 rs6000_generate_compare (rtx cmp, machine_mode mode)
21611 machine_mode comp_mode;
21612 rtx compare_result;
21613 enum rtx_code code = GET_CODE (cmp);
21614 rtx op0 = XEXP (cmp, 0);
21615 rtx op1 = XEXP (cmp, 1);
21617 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21618 comp_mode = CCmode;
21619 else if (FLOAT_MODE_P (mode))
21620 comp_mode = CCFPmode;
21621 else if (code == GTU || code == LTU
21622 || code == GEU || code == LEU)
21623 comp_mode = CCUNSmode;
21624 else if ((code == EQ || code == NE)
21625 && unsigned_reg_p (op0)
21626 && (unsigned_reg_p (op1)
21627 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21628 /* These are unsigned values, perhaps there will be a later
21629 ordering compare that can be shared with this one. */
21630 comp_mode = CCUNSmode;
21631 else
21632 comp_mode = CCmode;
21634 /* If we have an unsigned compare, make sure we don't have a signed value as
21635 an immediate. */
21636 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21637 && INTVAL (op1) < 0)
21639 op0 = copy_rtx_if_shared (op0);
21640 op1 = force_reg (GET_MODE (op0), op1);
21641 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21644 /* First, the compare. */
21645 compare_result = gen_reg_rtx (comp_mode);
21647 /* IEEE 128-bit support in VSX registers when we do not have hardware
21648 support. */
21649 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21651 rtx libfunc = NULL_RTX;
21652 bool check_nan = false;
21653 rtx dest;
21655 switch (code)
21657 case EQ:
21658 case NE:
21659 libfunc = optab_libfunc (eq_optab, mode);
21660 break;
21662 case GT:
21663 case GE:
21664 libfunc = optab_libfunc (ge_optab, mode);
21665 break;
21667 case LT:
21668 case LE:
21669 libfunc = optab_libfunc (le_optab, mode);
21670 break;
21672 case UNORDERED:
21673 case ORDERED:
21674 libfunc = optab_libfunc (unord_optab, mode);
21675 code = (code == UNORDERED) ? NE : EQ;
21676 break;
21678 case UNGE:
21679 case UNGT:
21680 check_nan = true;
21681 libfunc = optab_libfunc (ge_optab, mode);
21682 code = (code == UNGE) ? GE : GT;
21683 break;
21685 case UNLE:
21686 case UNLT:
21687 check_nan = true;
21688 libfunc = optab_libfunc (le_optab, mode);
21689 code = (code == UNLE) ? LE : LT;
21690 break;
21692 case UNEQ:
21693 case LTGT:
21694 check_nan = true;
21695 libfunc = optab_libfunc (eq_optab, mode);
21696 code = (code = UNEQ) ? EQ : NE;
21697 break;
21699 default:
21700 gcc_unreachable ();
21703 gcc_assert (libfunc);
21705 if (!check_nan)
21706 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21707 SImode, op0, mode, op1, mode);
21709 /* The library signals an exception for signalling NaNs, so we need to
21710 handle isgreater, etc. by first checking isordered. */
21711 else
21713 rtx ne_rtx, normal_dest, unord_dest;
21714 rtx unord_func = optab_libfunc (unord_optab, mode);
21715 rtx join_label = gen_label_rtx ();
21716 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21717 rtx unord_cmp = gen_reg_rtx (comp_mode);
21720 /* Test for either value being a NaN. */
21721 gcc_assert (unord_func);
21722 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21723 SImode, op0, mode, op1, mode);
21725 /* Set value (0) if either value is a NaN, and jump to the join
21726 label. */
21727 dest = gen_reg_rtx (SImode);
21728 emit_move_insn (dest, const1_rtx);
21729 emit_insn (gen_rtx_SET (unord_cmp,
21730 gen_rtx_COMPARE (comp_mode, unord_dest,
21731 const0_rtx)));
21733 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21734 emit_jump_insn (gen_rtx_SET (pc_rtx,
21735 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21736 join_ref,
21737 pc_rtx)));
21739 /* Do the normal comparison, knowing that the values are not
21740 NaNs. */
21741 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21742 SImode, op0, mode, op1, mode);
21744 emit_insn (gen_cstoresi4 (dest,
21745 gen_rtx_fmt_ee (code, SImode, normal_dest,
21746 const0_rtx),
21747 normal_dest, const0_rtx));
21749 /* Join NaN and non-Nan paths. Compare dest against 0. */
21750 emit_label (join_label);
21751 code = NE;
21754 emit_insn (gen_rtx_SET (compare_result,
21755 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21758 else
21760 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21761 CLOBBERs to match cmptf_internal2 pattern. */
21762 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21763 && FLOAT128_IBM_P (GET_MODE (op0))
21764 && TARGET_HARD_FLOAT)
21765 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21766 gen_rtvec (10,
21767 gen_rtx_SET (compare_result,
21768 gen_rtx_COMPARE (comp_mode, op0, op1)),
21769 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21770 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21771 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21772 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21773 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21774 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21775 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21776 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21777 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21778 else if (GET_CODE (op1) == UNSPEC
21779 && XINT (op1, 1) == UNSPEC_SP_TEST)
21781 rtx op1b = XVECEXP (op1, 0, 0);
21782 comp_mode = CCEQmode;
21783 compare_result = gen_reg_rtx (CCEQmode);
21784 if (TARGET_64BIT)
21785 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21786 else
21787 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21789 else
21790 emit_insn (gen_rtx_SET (compare_result,
21791 gen_rtx_COMPARE (comp_mode, op0, op1)));
21794 /* Some kinds of FP comparisons need an OR operation;
21795 under flag_finite_math_only we don't bother. */
21796 if (FLOAT_MODE_P (mode)
21797 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21798 && !flag_finite_math_only
21799 && (code == LE || code == GE
21800 || code == UNEQ || code == LTGT
21801 || code == UNGT || code == UNLT))
21803 enum rtx_code or1, or2;
21804 rtx or1_rtx, or2_rtx, compare2_rtx;
21805 rtx or_result = gen_reg_rtx (CCEQmode);
21807 switch (code)
21809 case LE: or1 = LT; or2 = EQ; break;
21810 case GE: or1 = GT; or2 = EQ; break;
21811 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21812 case LTGT: or1 = LT; or2 = GT; break;
21813 case UNGT: or1 = UNORDERED; or2 = GT; break;
21814 case UNLT: or1 = UNORDERED; or2 = LT; break;
21815 default: gcc_unreachable ();
21817 validate_condition_mode (or1, comp_mode);
21818 validate_condition_mode (or2, comp_mode);
21819 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21820 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21821 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21822 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21823 const_true_rtx);
21824 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21826 compare_result = or_result;
21827 code = EQ;
21830 validate_condition_mode (code, GET_MODE (compare_result));
21832 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21836 /* Return the diagnostic message string if the binary operation OP is
21837 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21839 static const char*
21840 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21841 const_tree type1,
21842 const_tree type2)
21844 machine_mode mode1 = TYPE_MODE (type1);
21845 machine_mode mode2 = TYPE_MODE (type2);
21847 /* For complex modes, use the inner type. */
21848 if (COMPLEX_MODE_P (mode1))
21849 mode1 = GET_MODE_INNER (mode1);
21851 if (COMPLEX_MODE_P (mode2))
21852 mode2 = GET_MODE_INNER (mode2);
21854 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21855 double to intermix unless -mfloat128-convert. */
21856 if (mode1 == mode2)
21857 return NULL;
21859 if (!TARGET_FLOAT128_CVT)
21861 if ((mode1 == KFmode && mode2 == IFmode)
21862 || (mode1 == IFmode && mode2 == KFmode))
21863 return N_("__float128 and __ibm128 cannot be used in the same "
21864 "expression");
21866 if (TARGET_IEEEQUAD
21867 && ((mode1 == IFmode && mode2 == TFmode)
21868 || (mode1 == TFmode && mode2 == IFmode)))
21869 return N_("__ibm128 and long double cannot be used in the same "
21870 "expression");
21872 if (!TARGET_IEEEQUAD
21873 && ((mode1 == KFmode && mode2 == TFmode)
21874 || (mode1 == TFmode && mode2 == KFmode)))
21875 return N_("__float128 and long double cannot be used in the same "
21876 "expression");
21879 return NULL;
21883 /* Expand floating point conversion to/from __float128 and __ibm128. */
21885 void
21886 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21888 machine_mode dest_mode = GET_MODE (dest);
21889 machine_mode src_mode = GET_MODE (src);
21890 convert_optab cvt = unknown_optab;
21891 bool do_move = false;
21892 rtx libfunc = NULL_RTX;
21893 rtx dest2;
21894 typedef rtx (*rtx_2func_t) (rtx, rtx);
21895 rtx_2func_t hw_convert = (rtx_2func_t)0;
21896 size_t kf_or_tf;
21898 struct hw_conv_t {
21899 rtx_2func_t from_df;
21900 rtx_2func_t from_sf;
21901 rtx_2func_t from_si_sign;
21902 rtx_2func_t from_si_uns;
21903 rtx_2func_t from_di_sign;
21904 rtx_2func_t from_di_uns;
21905 rtx_2func_t to_df;
21906 rtx_2func_t to_sf;
21907 rtx_2func_t to_si_sign;
21908 rtx_2func_t to_si_uns;
21909 rtx_2func_t to_di_sign;
21910 rtx_2func_t to_di_uns;
21911 } hw_conversions[2] = {
21912 /* convertions to/from KFmode */
21914 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21915 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21916 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21917 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21918 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21919 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21920 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21921 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21922 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21923 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21924 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21925 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21928 /* convertions to/from TFmode */
21930 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21931 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21932 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21933 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21934 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21935 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21936 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21937 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21938 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21939 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21940 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21941 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21945 if (dest_mode == src_mode)
21946 gcc_unreachable ();
21948 /* Eliminate memory operations. */
21949 if (MEM_P (src))
21950 src = force_reg (src_mode, src);
21952 if (MEM_P (dest))
21954 rtx tmp = gen_reg_rtx (dest_mode);
21955 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21956 rs6000_emit_move (dest, tmp, dest_mode);
21957 return;
21960 /* Convert to IEEE 128-bit floating point. */
21961 if (FLOAT128_IEEE_P (dest_mode))
21963 if (dest_mode == KFmode)
21964 kf_or_tf = 0;
21965 else if (dest_mode == TFmode)
21966 kf_or_tf = 1;
21967 else
21968 gcc_unreachable ();
21970 switch (src_mode)
21972 case E_DFmode:
21973 cvt = sext_optab;
21974 hw_convert = hw_conversions[kf_or_tf].from_df;
21975 break;
21977 case E_SFmode:
21978 cvt = sext_optab;
21979 hw_convert = hw_conversions[kf_or_tf].from_sf;
21980 break;
21982 case E_KFmode:
21983 case E_IFmode:
21984 case E_TFmode:
21985 if (FLOAT128_IBM_P (src_mode))
21986 cvt = sext_optab;
21987 else
21988 do_move = true;
21989 break;
21991 case E_SImode:
21992 if (unsigned_p)
21994 cvt = ufloat_optab;
21995 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21997 else
21999 cvt = sfloat_optab;
22000 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22002 break;
22004 case E_DImode:
22005 if (unsigned_p)
22007 cvt = ufloat_optab;
22008 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22010 else
22012 cvt = sfloat_optab;
22013 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22015 break;
22017 default:
22018 gcc_unreachable ();
22022 /* Convert from IEEE 128-bit floating point. */
22023 else if (FLOAT128_IEEE_P (src_mode))
22025 if (src_mode == KFmode)
22026 kf_or_tf = 0;
22027 else if (src_mode == TFmode)
22028 kf_or_tf = 1;
22029 else
22030 gcc_unreachable ();
22032 switch (dest_mode)
22034 case E_DFmode:
22035 cvt = trunc_optab;
22036 hw_convert = hw_conversions[kf_or_tf].to_df;
22037 break;
22039 case E_SFmode:
22040 cvt = trunc_optab;
22041 hw_convert = hw_conversions[kf_or_tf].to_sf;
22042 break;
22044 case E_KFmode:
22045 case E_IFmode:
22046 case E_TFmode:
22047 if (FLOAT128_IBM_P (dest_mode))
22048 cvt = trunc_optab;
22049 else
22050 do_move = true;
22051 break;
22053 case E_SImode:
22054 if (unsigned_p)
22056 cvt = ufix_optab;
22057 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22059 else
22061 cvt = sfix_optab;
22062 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22064 break;
22066 case E_DImode:
22067 if (unsigned_p)
22069 cvt = ufix_optab;
22070 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22072 else
22074 cvt = sfix_optab;
22075 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22077 break;
22079 default:
22080 gcc_unreachable ();
22084 /* Both IBM format. */
22085 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22086 do_move = true;
22088 else
22089 gcc_unreachable ();
22091 /* Handle conversion between TFmode/KFmode/IFmode. */
22092 if (do_move)
22093 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22095 /* Handle conversion if we have hardware support. */
22096 else if (TARGET_FLOAT128_HW && hw_convert)
22097 emit_insn ((hw_convert) (dest, src));
22099 /* Call an external function to do the conversion. */
22100 else if (cvt != unknown_optab)
22102 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22103 gcc_assert (libfunc != NULL_RTX);
22105 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22106 src, src_mode);
22108 gcc_assert (dest2 != NULL_RTX);
22109 if (!rtx_equal_p (dest, dest2))
22110 emit_move_insn (dest, dest2);
22113 else
22114 gcc_unreachable ();
22116 return;
22120 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22121 can be used as that dest register. Return the dest register. */
22124 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22126 if (op2 == const0_rtx)
22127 return op1;
22129 if (GET_CODE (scratch) == SCRATCH)
22130 scratch = gen_reg_rtx (mode);
22132 if (logical_operand (op2, mode))
22133 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22134 else
22135 emit_insn (gen_rtx_SET (scratch,
22136 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22138 return scratch;
22141 void
22142 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22144 rtx condition_rtx;
22145 machine_mode op_mode;
22146 enum rtx_code cond_code;
22147 rtx result = operands[0];
22149 condition_rtx = rs6000_generate_compare (operands[1], mode);
22150 cond_code = GET_CODE (condition_rtx);
22152 if (cond_code == NE
22153 || cond_code == GE || cond_code == LE
22154 || cond_code == GEU || cond_code == LEU
22155 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22157 rtx not_result = gen_reg_rtx (CCEQmode);
22158 rtx not_op, rev_cond_rtx;
22159 machine_mode cc_mode;
22161 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22163 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22164 SImode, XEXP (condition_rtx, 0), const0_rtx);
22165 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22166 emit_insn (gen_rtx_SET (not_result, not_op));
22167 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22170 op_mode = GET_MODE (XEXP (operands[1], 0));
22171 if (op_mode == VOIDmode)
22172 op_mode = GET_MODE (XEXP (operands[1], 1));
22174 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22176 PUT_MODE (condition_rtx, DImode);
22177 convert_move (result, condition_rtx, 0);
22179 else
22181 PUT_MODE (condition_rtx, SImode);
22182 emit_insn (gen_rtx_SET (result, condition_rtx));
22186 /* Emit a branch of kind CODE to location LOC. */
22188 void
22189 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22191 rtx condition_rtx, loc_ref;
22193 condition_rtx = rs6000_generate_compare (operands[0], mode);
22194 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22195 emit_jump_insn (gen_rtx_SET (pc_rtx,
22196 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22197 loc_ref, pc_rtx)));
22200 /* Return the string to output a conditional branch to LABEL, which is
22201 the operand template of the label, or NULL if the branch is really a
22202 conditional return.
22204 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22205 condition code register and its mode specifies what kind of
22206 comparison we made.
22208 REVERSED is nonzero if we should reverse the sense of the comparison.
22210 INSN is the insn. */
22212 char *
22213 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22215 static char string[64];
22216 enum rtx_code code = GET_CODE (op);
22217 rtx cc_reg = XEXP (op, 0);
22218 machine_mode mode = GET_MODE (cc_reg);
22219 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22220 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22221 int really_reversed = reversed ^ need_longbranch;
22222 char *s = string;
22223 const char *ccode;
22224 const char *pred;
22225 rtx note;
22227 validate_condition_mode (code, mode);
22229 /* Work out which way this really branches. We could use
22230 reverse_condition_maybe_unordered here always but this
22231 makes the resulting assembler clearer. */
22232 if (really_reversed)
22234 /* Reversal of FP compares takes care -- an ordered compare
22235 becomes an unordered compare and vice versa. */
22236 if (mode == CCFPmode)
22237 code = reverse_condition_maybe_unordered (code);
22238 else
22239 code = reverse_condition (code);
22242 switch (code)
22244 /* Not all of these are actually distinct opcodes, but
22245 we distinguish them for clarity of the resulting assembler. */
22246 case NE: case LTGT:
22247 ccode = "ne"; break;
22248 case EQ: case UNEQ:
22249 ccode = "eq"; break;
22250 case GE: case GEU:
22251 ccode = "ge"; break;
22252 case GT: case GTU: case UNGT:
22253 ccode = "gt"; break;
22254 case LE: case LEU:
22255 ccode = "le"; break;
22256 case LT: case LTU: case UNLT:
22257 ccode = "lt"; break;
22258 case UNORDERED: ccode = "un"; break;
22259 case ORDERED: ccode = "nu"; break;
22260 case UNGE: ccode = "nl"; break;
22261 case UNLE: ccode = "ng"; break;
22262 default:
22263 gcc_unreachable ();
22266 /* Maybe we have a guess as to how likely the branch is. */
22267 pred = "";
22268 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22269 if (note != NULL_RTX)
22271 /* PROB is the difference from 50%. */
22272 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22273 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22275 /* Only hint for highly probable/improbable branches on newer cpus when
22276 we have real profile data, as static prediction overrides processor
22277 dynamic prediction. For older cpus we may as well always hint, but
22278 assume not taken for branches that are very close to 50% as a
22279 mispredicted taken branch is more expensive than a
22280 mispredicted not-taken branch. */
22281 if (rs6000_always_hint
22282 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22283 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22284 && br_prob_note_reliable_p (note)))
22286 if (abs (prob) > REG_BR_PROB_BASE / 20
22287 && ((prob > 0) ^ need_longbranch))
22288 pred = "+";
22289 else
22290 pred = "-";
22294 if (label == NULL)
22295 s += sprintf (s, "b%slr%s ", ccode, pred);
22296 else
22297 s += sprintf (s, "b%s%s ", ccode, pred);
22299 /* We need to escape any '%' characters in the reg_names string.
22300 Assume they'd only be the first character.... */
22301 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22302 *s++ = '%';
22303 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22305 if (label != NULL)
22307 /* If the branch distance was too far, we may have to use an
22308 unconditional branch to go the distance. */
22309 if (need_longbranch)
22310 s += sprintf (s, ",$+8\n\tb %s", label);
22311 else
22312 s += sprintf (s, ",%s", label);
22315 return string;
22318 /* Return insn for VSX or Altivec comparisons. */
22320 static rtx
22321 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22323 rtx mask;
22324 machine_mode mode = GET_MODE (op0);
22326 switch (code)
22328 default:
22329 break;
22331 case GE:
22332 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22333 return NULL_RTX;
22334 /* FALLTHRU */
22336 case EQ:
22337 case GT:
22338 case GTU:
22339 case ORDERED:
22340 case UNORDERED:
22341 case UNEQ:
22342 case LTGT:
22343 mask = gen_reg_rtx (mode);
22344 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22345 return mask;
22348 return NULL_RTX;
22351 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22352 DMODE is expected destination mode. This is a recursive function. */
22354 static rtx
22355 rs6000_emit_vector_compare (enum rtx_code rcode,
22356 rtx op0, rtx op1,
22357 machine_mode dmode)
22359 rtx mask;
22360 bool swap_operands = false;
22361 bool try_again = false;
22363 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22364 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22366 /* See if the comparison works as is. */
22367 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22368 if (mask)
22369 return mask;
22371 switch (rcode)
22373 case LT:
22374 rcode = GT;
22375 swap_operands = true;
22376 try_again = true;
22377 break;
22378 case LTU:
22379 rcode = GTU;
22380 swap_operands = true;
22381 try_again = true;
22382 break;
22383 case NE:
22384 case UNLE:
22385 case UNLT:
22386 case UNGE:
22387 case UNGT:
22388 /* Invert condition and try again.
22389 e.g., A != B becomes ~(A==B). */
22391 enum rtx_code rev_code;
22392 enum insn_code nor_code;
22393 rtx mask2;
22395 rev_code = reverse_condition_maybe_unordered (rcode);
22396 if (rev_code == UNKNOWN)
22397 return NULL_RTX;
22399 nor_code = optab_handler (one_cmpl_optab, dmode);
22400 if (nor_code == CODE_FOR_nothing)
22401 return NULL_RTX;
22403 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22404 if (!mask2)
22405 return NULL_RTX;
22407 mask = gen_reg_rtx (dmode);
22408 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22409 return mask;
22411 break;
22412 case GE:
22413 case GEU:
22414 case LE:
22415 case LEU:
22416 /* Try GT/GTU/LT/LTU OR EQ */
22418 rtx c_rtx, eq_rtx;
22419 enum insn_code ior_code;
22420 enum rtx_code new_code;
22422 switch (rcode)
22424 case GE:
22425 new_code = GT;
22426 break;
22428 case GEU:
22429 new_code = GTU;
22430 break;
22432 case LE:
22433 new_code = LT;
22434 break;
22436 case LEU:
22437 new_code = LTU;
22438 break;
22440 default:
22441 gcc_unreachable ();
22444 ior_code = optab_handler (ior_optab, dmode);
22445 if (ior_code == CODE_FOR_nothing)
22446 return NULL_RTX;
22448 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22449 if (!c_rtx)
22450 return NULL_RTX;
22452 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22453 if (!eq_rtx)
22454 return NULL_RTX;
22456 mask = gen_reg_rtx (dmode);
22457 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22458 return mask;
22460 break;
22461 default:
22462 return NULL_RTX;
22465 if (try_again)
22467 if (swap_operands)
22468 std::swap (op0, op1);
22470 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22471 if (mask)
22472 return mask;
22475 /* You only get two chances. */
22476 return NULL_RTX;
22479 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22480 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22481 operands for the relation operation COND. */
22484 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22485 rtx cond, rtx cc_op0, rtx cc_op1)
22487 machine_mode dest_mode = GET_MODE (dest);
22488 machine_mode mask_mode = GET_MODE (cc_op0);
22489 enum rtx_code rcode = GET_CODE (cond);
22490 machine_mode cc_mode = CCmode;
22491 rtx mask;
22492 rtx cond2;
22493 bool invert_move = false;
22495 if (VECTOR_UNIT_NONE_P (dest_mode))
22496 return 0;
22498 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22499 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22501 switch (rcode)
22503 /* Swap operands if we can, and fall back to doing the operation as
22504 specified, and doing a NOR to invert the test. */
22505 case NE:
22506 case UNLE:
22507 case UNLT:
22508 case UNGE:
22509 case UNGT:
22510 /* Invert condition and try again.
22511 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22512 invert_move = true;
22513 rcode = reverse_condition_maybe_unordered (rcode);
22514 if (rcode == UNKNOWN)
22515 return 0;
22516 break;
22518 case GE:
22519 case LE:
22520 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22522 /* Invert condition to avoid compound test. */
22523 invert_move = true;
22524 rcode = reverse_condition (rcode);
22526 break;
22528 case GTU:
22529 case GEU:
22530 case LTU:
22531 case LEU:
22532 /* Mark unsigned tests with CCUNSmode. */
22533 cc_mode = CCUNSmode;
22535 /* Invert condition to avoid compound test if necessary. */
22536 if (rcode == GEU || rcode == LEU)
22538 invert_move = true;
22539 rcode = reverse_condition (rcode);
22541 break;
22543 default:
22544 break;
22547 /* Get the vector mask for the given relational operations. */
22548 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22550 if (!mask)
22551 return 0;
22553 if (invert_move)
22554 std::swap (op_true, op_false);
22556 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22557 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22558 && (GET_CODE (op_true) == CONST_VECTOR
22559 || GET_CODE (op_false) == CONST_VECTOR))
22561 rtx constant_0 = CONST0_RTX (dest_mode);
22562 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22564 if (op_true == constant_m1 && op_false == constant_0)
22566 emit_move_insn (dest, mask);
22567 return 1;
22570 else if (op_true == constant_0 && op_false == constant_m1)
22572 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22573 return 1;
22576 /* If we can't use the vector comparison directly, perhaps we can use
22577 the mask for the true or false fields, instead of loading up a
22578 constant. */
22579 if (op_true == constant_m1)
22580 op_true = mask;
22582 if (op_false == constant_0)
22583 op_false = mask;
22586 if (!REG_P (op_true) && !SUBREG_P (op_true))
22587 op_true = force_reg (dest_mode, op_true);
22589 if (!REG_P (op_false) && !SUBREG_P (op_false))
22590 op_false = force_reg (dest_mode, op_false);
22592 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22593 CONST0_RTX (dest_mode));
22594 emit_insn (gen_rtx_SET (dest,
22595 gen_rtx_IF_THEN_ELSE (dest_mode,
22596 cond2,
22597 op_true,
22598 op_false)));
22599 return 1;
22602 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22603 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22604 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22605 hardware has no such operation. */
22607 static int
22608 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22610 enum rtx_code code = GET_CODE (op);
22611 rtx op0 = XEXP (op, 0);
22612 rtx op1 = XEXP (op, 1);
22613 machine_mode compare_mode = GET_MODE (op0);
22614 machine_mode result_mode = GET_MODE (dest);
22615 bool max_p = false;
22617 if (result_mode != compare_mode)
22618 return 0;
22620 if (code == GE || code == GT)
22621 max_p = true;
22622 else if (code == LE || code == LT)
22623 max_p = false;
22624 else
22625 return 0;
22627 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22630 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22631 max_p = !max_p;
22633 else
22634 return 0;
22636 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22637 return 1;
22640 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22641 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22642 operands of the last comparison is nonzero/true, FALSE_COND if it is
22643 zero/false. Return 0 if the hardware has no such operation. */
22645 static int
22646 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22648 enum rtx_code code = GET_CODE (op);
22649 rtx op0 = XEXP (op, 0);
22650 rtx op1 = XEXP (op, 1);
22651 machine_mode result_mode = GET_MODE (dest);
22652 rtx compare_rtx;
22653 rtx cmove_rtx;
22654 rtx clobber_rtx;
22656 if (!can_create_pseudo_p ())
22657 return 0;
22659 switch (code)
22661 case EQ:
22662 case GE:
22663 case GT:
22664 break;
22666 case NE:
22667 case LT:
22668 case LE:
22669 code = swap_condition (code);
22670 std::swap (op0, op1);
22671 break;
22673 default:
22674 return 0;
22677 /* Generate: [(parallel [(set (dest)
22678 (if_then_else (op (cmp1) (cmp2))
22679 (true)
22680 (false)))
22681 (clobber (scratch))])]. */
22683 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22684 cmove_rtx = gen_rtx_SET (dest,
22685 gen_rtx_IF_THEN_ELSE (result_mode,
22686 compare_rtx,
22687 true_cond,
22688 false_cond));
22690 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22691 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22692 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22694 return 1;
22697 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22698 operands of the last comparison is nonzero/true, FALSE_COND if it
22699 is zero/false. Return 0 if the hardware has no such operation. */
22702 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22704 enum rtx_code code = GET_CODE (op);
22705 rtx op0 = XEXP (op, 0);
22706 rtx op1 = XEXP (op, 1);
22707 machine_mode compare_mode = GET_MODE (op0);
22708 machine_mode result_mode = GET_MODE (dest);
22709 rtx temp;
22710 bool is_against_zero;
22712 /* These modes should always match. */
22713 if (GET_MODE (op1) != compare_mode
22714 /* In the isel case however, we can use a compare immediate, so
22715 op1 may be a small constant. */
22716 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22717 return 0;
22718 if (GET_MODE (true_cond) != result_mode)
22719 return 0;
22720 if (GET_MODE (false_cond) != result_mode)
22721 return 0;
22723 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22724 if (TARGET_P9_MINMAX
22725 && (compare_mode == SFmode || compare_mode == DFmode)
22726 && (result_mode == SFmode || result_mode == DFmode))
22728 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22729 return 1;
22731 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22732 return 1;
22735 /* Don't allow using floating point comparisons for integer results for
22736 now. */
22737 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22738 return 0;
22740 /* First, work out if the hardware can do this at all, or
22741 if it's too slow.... */
22742 if (!FLOAT_MODE_P (compare_mode))
22744 if (TARGET_ISEL)
22745 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22746 return 0;
22749 is_against_zero = op1 == CONST0_RTX (compare_mode);
22751 /* A floating-point subtract might overflow, underflow, or produce
22752 an inexact result, thus changing the floating-point flags, so it
22753 can't be generated if we care about that. It's safe if one side
22754 of the construct is zero, since then no subtract will be
22755 generated. */
22756 if (SCALAR_FLOAT_MODE_P (compare_mode)
22757 && flag_trapping_math && ! is_against_zero)
22758 return 0;
22760 /* Eliminate half of the comparisons by switching operands, this
22761 makes the remaining code simpler. */
22762 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22763 || code == LTGT || code == LT || code == UNLE)
22765 code = reverse_condition_maybe_unordered (code);
22766 temp = true_cond;
22767 true_cond = false_cond;
22768 false_cond = temp;
22771 /* UNEQ and LTGT take four instructions for a comparison with zero,
22772 it'll probably be faster to use a branch here too. */
22773 if (code == UNEQ && HONOR_NANS (compare_mode))
22774 return 0;
22776 /* We're going to try to implement comparisons by performing
22777 a subtract, then comparing against zero. Unfortunately,
22778 Inf - Inf is NaN which is not zero, and so if we don't
22779 know that the operand is finite and the comparison
22780 would treat EQ different to UNORDERED, we can't do it. */
22781 if (HONOR_INFINITIES (compare_mode)
22782 && code != GT && code != UNGE
22783 && (!CONST_DOUBLE_P (op1)
22784 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22785 /* Constructs of the form (a OP b ? a : b) are safe. */
22786 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22787 || (! rtx_equal_p (op0, true_cond)
22788 && ! rtx_equal_p (op1, true_cond))))
22789 return 0;
22791 /* At this point we know we can use fsel. */
22793 /* Reduce the comparison to a comparison against zero. */
22794 if (! is_against_zero)
22796 temp = gen_reg_rtx (compare_mode);
22797 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22798 op0 = temp;
22799 op1 = CONST0_RTX (compare_mode);
22802 /* If we don't care about NaNs we can reduce some of the comparisons
22803 down to faster ones. */
22804 if (! HONOR_NANS (compare_mode))
22805 switch (code)
22807 case GT:
22808 code = LE;
22809 temp = true_cond;
22810 true_cond = false_cond;
22811 false_cond = temp;
22812 break;
22813 case UNGE:
22814 code = GE;
22815 break;
22816 case UNEQ:
22817 code = EQ;
22818 break;
22819 default:
22820 break;
22823 /* Now, reduce everything down to a GE. */
22824 switch (code)
22826 case GE:
22827 break;
22829 case LE:
22830 temp = gen_reg_rtx (compare_mode);
22831 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22832 op0 = temp;
22833 break;
22835 case ORDERED:
22836 temp = gen_reg_rtx (compare_mode);
22837 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22838 op0 = temp;
22839 break;
22841 case EQ:
22842 temp = gen_reg_rtx (compare_mode);
22843 emit_insn (gen_rtx_SET (temp,
22844 gen_rtx_NEG (compare_mode,
22845 gen_rtx_ABS (compare_mode, op0))));
22846 op0 = temp;
22847 break;
22849 case UNGE:
22850 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22851 temp = gen_reg_rtx (result_mode);
22852 emit_insn (gen_rtx_SET (temp,
22853 gen_rtx_IF_THEN_ELSE (result_mode,
22854 gen_rtx_GE (VOIDmode,
22855 op0, op1),
22856 true_cond, false_cond)));
22857 false_cond = true_cond;
22858 true_cond = temp;
22860 temp = gen_reg_rtx (compare_mode);
22861 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22862 op0 = temp;
22863 break;
22865 case GT:
22866 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22867 temp = gen_reg_rtx (result_mode);
22868 emit_insn (gen_rtx_SET (temp,
22869 gen_rtx_IF_THEN_ELSE (result_mode,
22870 gen_rtx_GE (VOIDmode,
22871 op0, op1),
22872 true_cond, false_cond)));
22873 true_cond = false_cond;
22874 false_cond = temp;
22876 temp = gen_reg_rtx (compare_mode);
22877 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22878 op0 = temp;
22879 break;
22881 default:
22882 gcc_unreachable ();
22885 emit_insn (gen_rtx_SET (dest,
22886 gen_rtx_IF_THEN_ELSE (result_mode,
22887 gen_rtx_GE (VOIDmode,
22888 op0, op1),
22889 true_cond, false_cond)));
22890 return 1;
22893 /* Same as above, but for ints (isel). */
22896 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22898 rtx condition_rtx, cr;
22899 machine_mode mode = GET_MODE (dest);
22900 enum rtx_code cond_code;
22901 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22902 bool signedp;
22904 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22905 return 0;
22907 /* We still have to do the compare, because isel doesn't do a
22908 compare, it just looks at the CRx bits set by a previous compare
22909 instruction. */
22910 condition_rtx = rs6000_generate_compare (op, mode);
22911 cond_code = GET_CODE (condition_rtx);
22912 cr = XEXP (condition_rtx, 0);
22913 signedp = GET_MODE (cr) == CCmode;
22915 isel_func = (mode == SImode
22916 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22917 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22919 switch (cond_code)
22921 case LT: case GT: case LTU: case GTU: case EQ:
22922 /* isel handles these directly. */
22923 break;
22925 default:
22926 /* We need to swap the sense of the comparison. */
22928 std::swap (false_cond, true_cond);
22929 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22931 break;
22934 false_cond = force_reg (mode, false_cond);
22935 if (true_cond != const0_rtx)
22936 true_cond = force_reg (mode, true_cond);
22938 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22940 return 1;
22943 void
22944 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22946 machine_mode mode = GET_MODE (op0);
22947 enum rtx_code c;
22948 rtx target;
22950 /* VSX/altivec have direct min/max insns. */
22951 if ((code == SMAX || code == SMIN)
22952 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22953 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22955 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22956 return;
22959 if (code == SMAX || code == SMIN)
22960 c = GE;
22961 else
22962 c = GEU;
22964 if (code == SMAX || code == UMAX)
22965 target = emit_conditional_move (dest, c, op0, op1, mode,
22966 op0, op1, mode, 0);
22967 else
22968 target = emit_conditional_move (dest, c, op0, op1, mode,
22969 op1, op0, mode, 0);
22970 gcc_assert (target);
22971 if (target != dest)
22972 emit_move_insn (dest, target);
22975 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22976 COND is true. Mark the jump as unlikely to be taken. */
22978 static void
22979 emit_unlikely_jump (rtx cond, rtx label)
22981 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22982 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22983 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22986 /* A subroutine of the atomic operation splitters. Emit a load-locked
22987 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22988 the zero_extend operation. */
22990 static void
22991 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22993 rtx (*fn) (rtx, rtx) = NULL;
22995 switch (mode)
22997 case E_QImode:
22998 fn = gen_load_lockedqi;
22999 break;
23000 case E_HImode:
23001 fn = gen_load_lockedhi;
23002 break;
23003 case E_SImode:
23004 if (GET_MODE (mem) == QImode)
23005 fn = gen_load_lockedqi_si;
23006 else if (GET_MODE (mem) == HImode)
23007 fn = gen_load_lockedhi_si;
23008 else
23009 fn = gen_load_lockedsi;
23010 break;
23011 case E_DImode:
23012 fn = gen_load_lockeddi;
23013 break;
23014 case E_TImode:
23015 fn = gen_load_lockedti;
23016 break;
23017 default:
23018 gcc_unreachable ();
23020 emit_insn (fn (reg, mem));
23023 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23024 instruction in MODE. */
23026 static void
23027 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23029 rtx (*fn) (rtx, rtx, rtx) = NULL;
23031 switch (mode)
23033 case E_QImode:
23034 fn = gen_store_conditionalqi;
23035 break;
23036 case E_HImode:
23037 fn = gen_store_conditionalhi;
23038 break;
23039 case E_SImode:
23040 fn = gen_store_conditionalsi;
23041 break;
23042 case E_DImode:
23043 fn = gen_store_conditionaldi;
23044 break;
23045 case E_TImode:
23046 fn = gen_store_conditionalti;
23047 break;
23048 default:
23049 gcc_unreachable ();
23052 /* Emit sync before stwcx. to address PPC405 Erratum. */
23053 if (PPC405_ERRATUM77)
23054 emit_insn (gen_hwsync ());
23056 emit_insn (fn (res, mem, val));
23059 /* Expand barriers before and after a load_locked/store_cond sequence. */
23061 static rtx
23062 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23064 rtx addr = XEXP (mem, 0);
23066 if (!legitimate_indirect_address_p (addr, reload_completed)
23067 && !legitimate_indexed_address_p (addr, reload_completed))
23069 addr = force_reg (Pmode, addr);
23070 mem = replace_equiv_address_nv (mem, addr);
23073 switch (model)
23075 case MEMMODEL_RELAXED:
23076 case MEMMODEL_CONSUME:
23077 case MEMMODEL_ACQUIRE:
23078 break;
23079 case MEMMODEL_RELEASE:
23080 case MEMMODEL_ACQ_REL:
23081 emit_insn (gen_lwsync ());
23082 break;
23083 case MEMMODEL_SEQ_CST:
23084 emit_insn (gen_hwsync ());
23085 break;
23086 default:
23087 gcc_unreachable ();
23089 return mem;
23092 static void
23093 rs6000_post_atomic_barrier (enum memmodel model)
23095 switch (model)
23097 case MEMMODEL_RELAXED:
23098 case MEMMODEL_CONSUME:
23099 case MEMMODEL_RELEASE:
23100 break;
23101 case MEMMODEL_ACQUIRE:
23102 case MEMMODEL_ACQ_REL:
23103 case MEMMODEL_SEQ_CST:
23104 emit_insn (gen_isync ());
23105 break;
23106 default:
23107 gcc_unreachable ();
23111 /* A subroutine of the various atomic expanders. For sub-word operations,
23112 we must adjust things to operate on SImode. Given the original MEM,
23113 return a new aligned memory. Also build and return the quantities by
23114 which to shift and mask. */
23116 static rtx
23117 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23119 rtx addr, align, shift, mask, mem;
23120 HOST_WIDE_INT shift_mask;
23121 machine_mode mode = GET_MODE (orig_mem);
23123 /* For smaller modes, we have to implement this via SImode. */
23124 shift_mask = (mode == QImode ? 0x18 : 0x10);
23126 addr = XEXP (orig_mem, 0);
23127 addr = force_reg (GET_MODE (addr), addr);
23129 /* Aligned memory containing subword. Generate a new memory. We
23130 do not want any of the existing MEM_ATTR data, as we're now
23131 accessing memory outside the original object. */
23132 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23133 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23134 mem = gen_rtx_MEM (SImode, align);
23135 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23136 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23137 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23139 /* Shift amount for subword relative to aligned word. */
23140 shift = gen_reg_rtx (SImode);
23141 addr = gen_lowpart (SImode, addr);
23142 rtx tmp = gen_reg_rtx (SImode);
23143 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23144 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23145 if (BYTES_BIG_ENDIAN)
23146 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23147 shift, 1, OPTAB_LIB_WIDEN);
23148 *pshift = shift;
23150 /* Mask for insertion. */
23151 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23152 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23153 *pmask = mask;
23155 return mem;
23158 /* A subroutine of the various atomic expanders. For sub-word operands,
23159 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23161 static rtx
23162 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23164 rtx x;
23166 x = gen_reg_rtx (SImode);
23167 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23168 gen_rtx_NOT (SImode, mask),
23169 oldval)));
23171 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23173 return x;
23176 /* A subroutine of the various atomic expanders. For sub-word operands,
23177 extract WIDE to NARROW via SHIFT. */
23179 static void
23180 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23182 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23183 wide, 1, OPTAB_LIB_WIDEN);
23184 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23187 /* Expand an atomic compare and swap operation. */
23189 void
23190 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23192 rtx boolval, retval, mem, oldval, newval, cond;
23193 rtx label1, label2, x, mask, shift;
23194 machine_mode mode, orig_mode;
23195 enum memmodel mod_s, mod_f;
23196 bool is_weak;
23198 boolval = operands[0];
23199 retval = operands[1];
23200 mem = operands[2];
23201 oldval = operands[3];
23202 newval = operands[4];
23203 is_weak = (INTVAL (operands[5]) != 0);
23204 mod_s = memmodel_base (INTVAL (operands[6]));
23205 mod_f = memmodel_base (INTVAL (operands[7]));
23206 orig_mode = mode = GET_MODE (mem);
23208 mask = shift = NULL_RTX;
23209 if (mode == QImode || mode == HImode)
23211 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23212 lwarx and shift/mask operations. With power8, we need to do the
23213 comparison in SImode, but the store is still done in QI/HImode. */
23214 oldval = convert_modes (SImode, mode, oldval, 1);
23216 if (!TARGET_SYNC_HI_QI)
23218 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23220 /* Shift and mask OLDVAL into position with the word. */
23221 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23222 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23224 /* Shift and mask NEWVAL into position within the word. */
23225 newval = convert_modes (SImode, mode, newval, 1);
23226 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23227 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23230 /* Prepare to adjust the return value. */
23231 retval = gen_reg_rtx (SImode);
23232 mode = SImode;
23234 else if (reg_overlap_mentioned_p (retval, oldval))
23235 oldval = copy_to_reg (oldval);
23237 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23238 oldval = copy_to_mode_reg (mode, oldval);
23240 if (reg_overlap_mentioned_p (retval, newval))
23241 newval = copy_to_reg (newval);
23243 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23245 label1 = NULL_RTX;
23246 if (!is_weak)
23248 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23249 emit_label (XEXP (label1, 0));
23251 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23253 emit_load_locked (mode, retval, mem);
23255 x = retval;
23256 if (mask)
23257 x = expand_simple_binop (SImode, AND, retval, mask,
23258 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23260 cond = gen_reg_rtx (CCmode);
23261 /* If we have TImode, synthesize a comparison. */
23262 if (mode != TImode)
23263 x = gen_rtx_COMPARE (CCmode, x, oldval);
23264 else
23266 rtx xor1_result = gen_reg_rtx (DImode);
23267 rtx xor2_result = gen_reg_rtx (DImode);
23268 rtx or_result = gen_reg_rtx (DImode);
23269 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23270 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23271 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23272 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23274 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23275 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23276 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23277 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23280 emit_insn (gen_rtx_SET (cond, x));
23282 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23283 emit_unlikely_jump (x, label2);
23285 x = newval;
23286 if (mask)
23287 x = rs6000_mask_atomic_subword (retval, newval, mask);
23289 emit_store_conditional (orig_mode, cond, mem, x);
23291 if (!is_weak)
23293 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23294 emit_unlikely_jump (x, label1);
23297 if (!is_mm_relaxed (mod_f))
23298 emit_label (XEXP (label2, 0));
23300 rs6000_post_atomic_barrier (mod_s);
23302 if (is_mm_relaxed (mod_f))
23303 emit_label (XEXP (label2, 0));
23305 if (shift)
23306 rs6000_finish_atomic_subword (operands[1], retval, shift);
23307 else if (mode != GET_MODE (operands[1]))
23308 convert_move (operands[1], retval, 1);
23310 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23311 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23312 emit_insn (gen_rtx_SET (boolval, x));
23315 /* Expand an atomic exchange operation. */
23317 void
23318 rs6000_expand_atomic_exchange (rtx operands[])
23320 rtx retval, mem, val, cond;
23321 machine_mode mode;
23322 enum memmodel model;
23323 rtx label, x, mask, shift;
23325 retval = operands[0];
23326 mem = operands[1];
23327 val = operands[2];
23328 model = memmodel_base (INTVAL (operands[3]));
23329 mode = GET_MODE (mem);
23331 mask = shift = NULL_RTX;
23332 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23334 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23336 /* Shift and mask VAL into position with the word. */
23337 val = convert_modes (SImode, mode, val, 1);
23338 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23339 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23341 /* Prepare to adjust the return value. */
23342 retval = gen_reg_rtx (SImode);
23343 mode = SImode;
23346 mem = rs6000_pre_atomic_barrier (mem, model);
23348 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23349 emit_label (XEXP (label, 0));
23351 emit_load_locked (mode, retval, mem);
23353 x = val;
23354 if (mask)
23355 x = rs6000_mask_atomic_subword (retval, val, mask);
23357 cond = gen_reg_rtx (CCmode);
23358 emit_store_conditional (mode, cond, mem, x);
23360 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23361 emit_unlikely_jump (x, label);
23363 rs6000_post_atomic_barrier (model);
23365 if (shift)
23366 rs6000_finish_atomic_subword (operands[0], retval, shift);
23369 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23370 to perform. MEM is the memory on which to operate. VAL is the second
23371 operand of the binary operator. BEFORE and AFTER are optional locations to
23372 return the value of MEM either before of after the operation. MODEL_RTX
23373 is a CONST_INT containing the memory model to use. */
23375 void
23376 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23377 rtx orig_before, rtx orig_after, rtx model_rtx)
23379 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23380 machine_mode mode = GET_MODE (mem);
23381 machine_mode store_mode = mode;
23382 rtx label, x, cond, mask, shift;
23383 rtx before = orig_before, after = orig_after;
23385 mask = shift = NULL_RTX;
23386 /* On power8, we want to use SImode for the operation. On previous systems,
23387 use the operation in a subword and shift/mask to get the proper byte or
23388 halfword. */
23389 if (mode == QImode || mode == HImode)
23391 if (TARGET_SYNC_HI_QI)
23393 val = convert_modes (SImode, mode, val, 1);
23395 /* Prepare to adjust the return value. */
23396 before = gen_reg_rtx (SImode);
23397 if (after)
23398 after = gen_reg_rtx (SImode);
23399 mode = SImode;
23401 else
23403 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23405 /* Shift and mask VAL into position with the word. */
23406 val = convert_modes (SImode, mode, val, 1);
23407 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23408 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23410 switch (code)
23412 case IOR:
23413 case XOR:
23414 /* We've already zero-extended VAL. That is sufficient to
23415 make certain that it does not affect other bits. */
23416 mask = NULL;
23417 break;
23419 case AND:
23420 /* If we make certain that all of the other bits in VAL are
23421 set, that will be sufficient to not affect other bits. */
23422 x = gen_rtx_NOT (SImode, mask);
23423 x = gen_rtx_IOR (SImode, x, val);
23424 emit_insn (gen_rtx_SET (val, x));
23425 mask = NULL;
23426 break;
23428 case NOT:
23429 case PLUS:
23430 case MINUS:
23431 /* These will all affect bits outside the field and need
23432 adjustment via MASK within the loop. */
23433 break;
23435 default:
23436 gcc_unreachable ();
23439 /* Prepare to adjust the return value. */
23440 before = gen_reg_rtx (SImode);
23441 if (after)
23442 after = gen_reg_rtx (SImode);
23443 store_mode = mode = SImode;
23447 mem = rs6000_pre_atomic_barrier (mem, model);
23449 label = gen_label_rtx ();
23450 emit_label (label);
23451 label = gen_rtx_LABEL_REF (VOIDmode, label);
23453 if (before == NULL_RTX)
23454 before = gen_reg_rtx (mode);
23456 emit_load_locked (mode, before, mem);
23458 if (code == NOT)
23460 x = expand_simple_binop (mode, AND, before, val,
23461 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23462 after = expand_simple_unop (mode, NOT, x, after, 1);
23464 else
23466 after = expand_simple_binop (mode, code, before, val,
23467 after, 1, OPTAB_LIB_WIDEN);
23470 x = after;
23471 if (mask)
23473 x = expand_simple_binop (SImode, AND, after, mask,
23474 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23475 x = rs6000_mask_atomic_subword (before, x, mask);
23477 else if (store_mode != mode)
23478 x = convert_modes (store_mode, mode, x, 1);
23480 cond = gen_reg_rtx (CCmode);
23481 emit_store_conditional (store_mode, cond, mem, x);
23483 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23484 emit_unlikely_jump (x, label);
23486 rs6000_post_atomic_barrier (model);
23488 if (shift)
23490 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23491 then do the calcuations in a SImode register. */
23492 if (orig_before)
23493 rs6000_finish_atomic_subword (orig_before, before, shift);
23494 if (orig_after)
23495 rs6000_finish_atomic_subword (orig_after, after, shift);
23497 else if (store_mode != mode)
23499 /* QImode/HImode on machines with lbarx/lharx where we do the native
23500 operation and then do the calcuations in a SImode register. */
23501 if (orig_before)
23502 convert_move (orig_before, before, 1);
23503 if (orig_after)
23504 convert_move (orig_after, after, 1);
23506 else if (orig_after && after != orig_after)
23507 emit_move_insn (orig_after, after);
23510 /* Emit instructions to move SRC to DST. Called by splitters for
23511 multi-register moves. It will emit at most one instruction for
23512 each register that is accessed; that is, it won't emit li/lis pairs
23513 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23514 register. */
23516 void
23517 rs6000_split_multireg_move (rtx dst, rtx src)
23519 /* The register number of the first register being moved. */
23520 int reg;
23521 /* The mode that is to be moved. */
23522 machine_mode mode;
23523 /* The mode that the move is being done in, and its size. */
23524 machine_mode reg_mode;
23525 int reg_mode_size;
23526 /* The number of registers that will be moved. */
23527 int nregs;
23529 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23530 mode = GET_MODE (dst);
23531 nregs = hard_regno_nregs (reg, mode);
23532 if (FP_REGNO_P (reg))
23533 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23534 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23535 else if (ALTIVEC_REGNO_P (reg))
23536 reg_mode = V16QImode;
23537 else
23538 reg_mode = word_mode;
23539 reg_mode_size = GET_MODE_SIZE (reg_mode);
23541 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23543 /* TDmode residing in FP registers is special, since the ISA requires that
23544 the lower-numbered word of a register pair is always the most significant
23545 word, even in little-endian mode. This does not match the usual subreg
23546 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23547 the appropriate constituent registers "by hand" in little-endian mode.
23549 Note we do not need to check for destructive overlap here since TDmode
23550 can only reside in even/odd register pairs. */
23551 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23553 rtx p_src, p_dst;
23554 int i;
23556 for (i = 0; i < nregs; i++)
23558 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23559 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23560 else
23561 p_src = simplify_gen_subreg (reg_mode, src, mode,
23562 i * reg_mode_size);
23564 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23565 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23566 else
23567 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23568 i * reg_mode_size);
23570 emit_insn (gen_rtx_SET (p_dst, p_src));
23573 return;
23576 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23578 /* Move register range backwards, if we might have destructive
23579 overlap. */
23580 int i;
23581 for (i = nregs - 1; i >= 0; i--)
23582 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23583 i * reg_mode_size),
23584 simplify_gen_subreg (reg_mode, src, mode,
23585 i * reg_mode_size)));
23587 else
23589 int i;
23590 int j = -1;
23591 bool used_update = false;
23592 rtx restore_basereg = NULL_RTX;
23594 if (MEM_P (src) && INT_REGNO_P (reg))
23596 rtx breg;
23598 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23599 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23601 rtx delta_rtx;
23602 breg = XEXP (XEXP (src, 0), 0);
23603 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23604 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23605 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23606 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23607 src = replace_equiv_address (src, breg);
23609 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23611 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23613 rtx basereg = XEXP (XEXP (src, 0), 0);
23614 if (TARGET_UPDATE)
23616 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23617 emit_insn (gen_rtx_SET (ndst,
23618 gen_rtx_MEM (reg_mode,
23619 XEXP (src, 0))));
23620 used_update = true;
23622 else
23623 emit_insn (gen_rtx_SET (basereg,
23624 XEXP (XEXP (src, 0), 1)));
23625 src = replace_equiv_address (src, basereg);
23627 else
23629 rtx basereg = gen_rtx_REG (Pmode, reg);
23630 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23631 src = replace_equiv_address (src, basereg);
23635 breg = XEXP (src, 0);
23636 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23637 breg = XEXP (breg, 0);
23639 /* If the base register we are using to address memory is
23640 also a destination reg, then change that register last. */
23641 if (REG_P (breg)
23642 && REGNO (breg) >= REGNO (dst)
23643 && REGNO (breg) < REGNO (dst) + nregs)
23644 j = REGNO (breg) - REGNO (dst);
23646 else if (MEM_P (dst) && INT_REGNO_P (reg))
23648 rtx breg;
23650 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23651 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23653 rtx delta_rtx;
23654 breg = XEXP (XEXP (dst, 0), 0);
23655 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23656 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23657 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23659 /* We have to update the breg before doing the store.
23660 Use store with update, if available. */
23662 if (TARGET_UPDATE)
23664 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23665 emit_insn (TARGET_32BIT
23666 ? (TARGET_POWERPC64
23667 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23668 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23669 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23670 used_update = true;
23672 else
23673 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23674 dst = replace_equiv_address (dst, breg);
23676 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23677 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23679 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23681 rtx basereg = XEXP (XEXP (dst, 0), 0);
23682 if (TARGET_UPDATE)
23684 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23685 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23686 XEXP (dst, 0)),
23687 nsrc));
23688 used_update = true;
23690 else
23691 emit_insn (gen_rtx_SET (basereg,
23692 XEXP (XEXP (dst, 0), 1)));
23693 dst = replace_equiv_address (dst, basereg);
23695 else
23697 rtx basereg = XEXP (XEXP (dst, 0), 0);
23698 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23699 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23700 && REG_P (basereg)
23701 && REG_P (offsetreg)
23702 && REGNO (basereg) != REGNO (offsetreg));
23703 if (REGNO (basereg) == 0)
23705 rtx tmp = offsetreg;
23706 offsetreg = basereg;
23707 basereg = tmp;
23709 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23710 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23711 dst = replace_equiv_address (dst, basereg);
23714 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23715 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23718 for (i = 0; i < nregs; i++)
23720 /* Calculate index to next subword. */
23721 ++j;
23722 if (j == nregs)
23723 j = 0;
23725 /* If compiler already emitted move of first word by
23726 store with update, no need to do anything. */
23727 if (j == 0 && used_update)
23728 continue;
23730 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23731 j * reg_mode_size),
23732 simplify_gen_subreg (reg_mode, src, mode,
23733 j * reg_mode_size)));
23735 if (restore_basereg != NULL_RTX)
23736 emit_insn (restore_basereg);
23740 static GTY(()) alias_set_type TOC_alias_set = -1;
23742 alias_set_type
23743 get_TOC_alias_set (void)
23745 if (TOC_alias_set == -1)
23746 TOC_alias_set = new_alias_set ();
23747 return TOC_alias_set;
23750 /* Return the internal arg pointer used for function incoming
23751 arguments. When -fsplit-stack, the arg pointer is r12 so we need
23752 to copy it to a pseudo in order for it to be preserved over calls
23753 and suchlike. We'd really like to use a pseudo here for the
23754 internal arg pointer but data-flow analysis is not prepared to
23755 accept pseudos as live at the beginning of a function. */
23757 static rtx
23758 rs6000_internal_arg_pointer (void)
23760 if (flag_split_stack
23761 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
23762 == NULL))
23765 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
23767 rtx pat;
23769 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
23770 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
23772 /* Put the pseudo initialization right after the note at the
23773 beginning of the function. */
23774 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
23775 gen_rtx_REG (Pmode, 12));
23776 push_topmost_sequence ();
23777 emit_insn_after (pat, get_insns ());
23778 pop_topmost_sequence ();
23780 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
23781 FIRST_PARM_OFFSET (current_function_decl));
23782 return copy_to_reg (ret);
23784 return virtual_incoming_args_rtx;
23787 /* We may have to tell the dataflow pass that the split stack prologue
23788 is initializing a register. */
23790 static void
23791 rs6000_live_on_entry (bitmap regs)
23793 if (flag_split_stack)
23794 bitmap_set_bit (regs, 12);
23798 /* A C compound statement that outputs the assembler code for a thunk
23799 function, used to implement C++ virtual function calls with
23800 multiple inheritance. The thunk acts as a wrapper around a virtual
23801 function, adjusting the implicit object parameter before handing
23802 control off to the real function.
23804 First, emit code to add the integer DELTA to the location that
23805 contains the incoming first argument. Assume that this argument
23806 contains a pointer, and is the one used to pass the `this' pointer
23807 in C++. This is the incoming argument *before* the function
23808 prologue, e.g. `%o0' on a sparc. The addition must preserve the
23809 values of all other incoming arguments.
23811 After the addition, emit code to jump to FUNCTION, which is a
23812 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
23813 not touch the return address. Hence returning from FUNCTION will
23814 return to whoever called the current `thunk'.
23816 The effect must be as if FUNCTION had been called directly with the
23817 adjusted first argument. This macro is responsible for emitting
23818 all of the code for a thunk function; output_function_prologue()
23819 and output_function_epilogue() are not invoked.
23821 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
23822 been extracted from it.) It might possibly be useful on some
23823 targets, but probably not.
23825 If you do not define this macro, the target-independent code in the
23826 C++ frontend will generate a less efficient heavyweight thunk that
23827 calls FUNCTION instead of jumping to it. The generic approach does
23828 not support varargs. */
23830 static void
23831 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
23832 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
23833 tree function)
23835 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
23836 rtx this_rtx, funexp;
23837 rtx_insn *insn;
23839 reload_completed = 1;
23840 epilogue_completed = 1;
23842 /* Mark the end of the (empty) prologue. */
23843 emit_note (NOTE_INSN_PROLOGUE_END);
23845 /* Find the "this" pointer. If the function returns a structure,
23846 the structure return pointer is in r3. */
23847 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
23848 this_rtx = gen_rtx_REG (Pmode, 4);
23849 else
23850 this_rtx = gen_rtx_REG (Pmode, 3);
23852 /* Apply the constant offset, if required. */
23853 if (delta)
23854 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
23856 /* Apply the offset from the vtable, if required. */
23857 if (vcall_offset)
23859 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
23860 rtx tmp = gen_rtx_REG (Pmode, 12);
23862 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
23863 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
23865 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
23866 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
23868 else
23870 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
23872 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
23874 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
23877 /* Generate a tail call to the target function. */
23878 if (!TREE_USED (function))
23880 assemble_external (function);
23881 TREE_USED (function) = 1;
23883 funexp = XEXP (DECL_RTL (function), 0);
23884 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
23886 #if TARGET_MACHO
23887 if (MACHOPIC_INDIRECT)
23888 funexp = machopic_indirect_call_target (funexp);
23889 #endif
23891 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
23892 generate sibcall RTL explicitly. */
23893 insn = emit_call_insn (
23894 gen_rtx_PARALLEL (VOIDmode,
23895 gen_rtvec (3,
23896 gen_rtx_CALL (VOIDmode,
23897 funexp, const0_rtx),
23898 gen_rtx_USE (VOIDmode, const0_rtx),
23899 simple_return_rtx)));
23900 SIBLING_CALL_P (insn) = 1;
23901 emit_barrier ();
23903 /* Run just enough of rest_of_compilation to get the insns emitted.
23904 There's not really enough bulk here to make other passes such as
23905 instruction scheduling worth while. */
23906 insn = get_insns ();
23907 shorten_branches (insn);
23908 assemble_start_function (thunk_fndecl, fnname);
23909 final_start_function (insn, file, 1);
23910 final (insn, file, 1);
23911 final_end_function ();
23912 assemble_end_function (thunk_fndecl, fnname);
23914 reload_completed = 0;
23915 epilogue_completed = 0;
23918 /* A quick summary of the various types of 'constant-pool tables'
23919 under PowerPC:
23921 Target Flags Name One table per
23922 AIX (none) AIX TOC object file
23923 AIX -mfull-toc AIX TOC object file
23924 AIX -mminimal-toc AIX minimal TOC translation unit
23925 SVR4/EABI (none) SVR4 SDATA object file
23926 SVR4/EABI -fpic SVR4 pic object file
23927 SVR4/EABI -fPIC SVR4 PIC translation unit
23928 SVR4/EABI -mrelocatable EABI TOC function
23929 SVR4/EABI -maix AIX TOC object file
23930 SVR4/EABI -maix -mminimal-toc
23931 AIX minimal TOC translation unit
23933 Name Reg. Set by entries contains:
23934 made by addrs? fp? sum?
23936 AIX TOC 2 crt0 as Y option option
23937 AIX minimal TOC 30 prolog gcc Y Y option
23938 SVR4 SDATA 13 crt0 gcc N Y N
23939 SVR4 pic 30 prolog ld Y not yet N
23940 SVR4 PIC 30 prolog gcc Y option option
23941 EABI TOC 30 prolog gcc Y option option
23945 /* Hash functions for the hash table. */
23947 static unsigned
23948 rs6000_hash_constant (rtx k)
23950 enum rtx_code code = GET_CODE (k);
23951 machine_mode mode = GET_MODE (k);
23952 unsigned result = (code << 3) ^ mode;
23953 const char *format;
23954 int flen, fidx;
23956 format = GET_RTX_FORMAT (code);
23957 flen = strlen (format);
23958 fidx = 0;
23960 switch (code)
23962 case LABEL_REF:
23963 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
23965 case CONST_WIDE_INT:
23967 int i;
23968 flen = CONST_WIDE_INT_NUNITS (k);
23969 for (i = 0; i < flen; i++)
23970 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
23971 return result;
23974 case CONST_DOUBLE:
23975 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
23977 case CODE_LABEL:
23978 fidx = 3;
23979 break;
23981 default:
23982 break;
23985 for (; fidx < flen; fidx++)
23986 switch (format[fidx])
23988 case 's':
23990 unsigned i, len;
23991 const char *str = XSTR (k, fidx);
23992 len = strlen (str);
23993 result = result * 613 + len;
23994 for (i = 0; i < len; i++)
23995 result = result * 613 + (unsigned) str[i];
23996 break;
23998 case 'u':
23999 case 'e':
24000 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
24001 break;
24002 case 'i':
24003 case 'n':
24004 result = result * 613 + (unsigned) XINT (k, fidx);
24005 break;
24006 case 'w':
24007 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
24008 result = result * 613 + (unsigned) XWINT (k, fidx);
24009 else
24011 size_t i;
24012 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
24013 result = result * 613 + (unsigned) (XWINT (k, fidx)
24014 >> CHAR_BIT * i);
24016 break;
24017 case '0':
24018 break;
24019 default:
24020 gcc_unreachable ();
24023 return result;
24026 hashval_t
24027 toc_hasher::hash (toc_hash_struct *thc)
24029 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
24032 /* Compare H1 and H2 for equivalence. */
24034 bool
24035 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
24037 rtx r1 = h1->key;
24038 rtx r2 = h2->key;
24040 if (h1->key_mode != h2->key_mode)
24041 return 0;
24043 return rtx_equal_p (r1, r2);
24046 /* These are the names given by the C++ front-end to vtables, and
24047 vtable-like objects. Ideally, this logic should not be here;
24048 instead, there should be some programmatic way of inquiring as
24049 to whether or not an object is a vtable. */
24051 #define VTABLE_NAME_P(NAME) \
24052 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
24053 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
24054 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
24055 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
24056 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
24058 #ifdef NO_DOLLAR_IN_LABEL
24059 /* Return a GGC-allocated character string translating dollar signs in
24060 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
24062 const char *
24063 rs6000_xcoff_strip_dollar (const char *name)
24065 char *strip, *p;
24066 const char *q;
24067 size_t len;
24069 q = (const char *) strchr (name, '$');
24071 if (q == 0 || q == name)
24072 return name;
24074 len = strlen (name);
24075 strip = XALLOCAVEC (char, len + 1);
24076 strcpy (strip, name);
24077 p = strip + (q - name);
24078 while (p)
24080 *p = '_';
24081 p = strchr (p + 1, '$');
24084 return ggc_alloc_string (strip, len);
24086 #endif
24088 void
24089 rs6000_output_symbol_ref (FILE *file, rtx x)
24091 const char *name = XSTR (x, 0);
24093 /* Currently C++ toc references to vtables can be emitted before it
24094 is decided whether the vtable is public or private. If this is
24095 the case, then the linker will eventually complain that there is
24096 a reference to an unknown section. Thus, for vtables only,
24097 we emit the TOC reference to reference the identifier and not the
24098 symbol. */
24099 if (VTABLE_NAME_P (name))
24101 RS6000_OUTPUT_BASENAME (file, name);
24103 else
24104 assemble_name (file, name);
24107 /* Output a TOC entry. We derive the entry name from what is being
24108 written. */
24110 void
24111 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
24113 char buf[256];
24114 const char *name = buf;
24115 rtx base = x;
24116 HOST_WIDE_INT offset = 0;
24118 gcc_assert (!TARGET_NO_TOC_OR_PCREL);
24120 /* When the linker won't eliminate them, don't output duplicate
24121 TOC entries (this happens on AIX if there is any kind of TOC,
24122 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
24123 CODE_LABELs. */
24124 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
24126 struct toc_hash_struct *h;
24128 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
24129 time because GGC is not initialized at that point. */
24130 if (toc_hash_table == NULL)
24131 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
24133 h = ggc_alloc<toc_hash_struct> ();
24134 h->key = x;
24135 h->key_mode = mode;
24136 h->labelno = labelno;
24138 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
24139 if (*found == NULL)
24140 *found = h;
24141 else /* This is indeed a duplicate.
24142 Set this label equal to that label. */
24144 fputs ("\t.set ", file);
24145 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24146 fprintf (file, "%d,", labelno);
24147 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24148 fprintf (file, "%d\n", ((*found)->labelno));
24150 #ifdef HAVE_AS_TLS
24151 if (TARGET_XCOFF && SYMBOL_REF_P (x)
24152 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
24153 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
24155 fputs ("\t.set ", file);
24156 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24157 fprintf (file, "%d,", labelno);
24158 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24159 fprintf (file, "%d\n", ((*found)->labelno));
24161 #endif
24162 return;
24166 /* If we're going to put a double constant in the TOC, make sure it's
24167 aligned properly when strict alignment is on. */
24168 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
24169 && STRICT_ALIGNMENT
24170 && GET_MODE_BITSIZE (mode) >= 64
24171 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
24172 ASM_OUTPUT_ALIGN (file, 3);
24175 (*targetm.asm_out.internal_label) (file, "LC", labelno);
24177 /* Handle FP constants specially. Note that if we have a minimal
24178 TOC, things we put here aren't actually in the TOC, so we can allow
24179 FP constants. */
24180 if (CONST_DOUBLE_P (x)
24181 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
24182 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
24184 long k[4];
24186 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24187 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
24188 else
24189 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24191 if (TARGET_64BIT)
24193 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24194 fputs (DOUBLE_INT_ASM_OP, file);
24195 else
24196 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24197 k[0] & 0xffffffff, k[1] & 0xffffffff,
24198 k[2] & 0xffffffff, k[3] & 0xffffffff);
24199 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
24200 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24201 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
24202 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
24203 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
24204 return;
24206 else
24208 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24209 fputs ("\t.long ", file);
24210 else
24211 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24212 k[0] & 0xffffffff, k[1] & 0xffffffff,
24213 k[2] & 0xffffffff, k[3] & 0xffffffff);
24214 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
24215 k[0] & 0xffffffff, k[1] & 0xffffffff,
24216 k[2] & 0xffffffff, k[3] & 0xffffffff);
24217 return;
24220 else if (CONST_DOUBLE_P (x)
24221 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
24223 long k[2];
24225 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24226 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
24227 else
24228 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24230 if (TARGET_64BIT)
24232 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24233 fputs (DOUBLE_INT_ASM_OP, file);
24234 else
24235 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24236 k[0] & 0xffffffff, k[1] & 0xffffffff);
24237 fprintf (file, "0x%lx%08lx\n",
24238 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24239 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
24240 return;
24242 else
24244 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24245 fputs ("\t.long ", file);
24246 else
24247 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24248 k[0] & 0xffffffff, k[1] & 0xffffffff);
24249 fprintf (file, "0x%lx,0x%lx\n",
24250 k[0] & 0xffffffff, k[1] & 0xffffffff);
24251 return;
24254 else if (CONST_DOUBLE_P (x)
24255 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
24257 long l;
24259 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24260 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
24261 else
24262 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
24264 if (TARGET_64BIT)
24266 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24267 fputs (DOUBLE_INT_ASM_OP, file);
24268 else
24269 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24270 if (WORDS_BIG_ENDIAN)
24271 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
24272 else
24273 fprintf (file, "0x%lx\n", l & 0xffffffff);
24274 return;
24276 else
24278 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24279 fputs ("\t.long ", file);
24280 else
24281 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24282 fprintf (file, "0x%lx\n", l & 0xffffffff);
24283 return;
24286 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
24288 unsigned HOST_WIDE_INT low;
24289 HOST_WIDE_INT high;
24291 low = INTVAL (x) & 0xffffffff;
24292 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
24294 /* TOC entries are always Pmode-sized, so when big-endian
24295 smaller integer constants in the TOC need to be padded.
24296 (This is still a win over putting the constants in
24297 a separate constant pool, because then we'd have
24298 to have both a TOC entry _and_ the actual constant.)
24300 For a 32-bit target, CONST_INT values are loaded and shifted
24301 entirely within `low' and can be stored in one TOC entry. */
24303 /* It would be easy to make this work, but it doesn't now. */
24304 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
24306 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
24308 low |= high << 32;
24309 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
24310 high = (HOST_WIDE_INT) low >> 32;
24311 low &= 0xffffffff;
24314 if (TARGET_64BIT)
24316 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24317 fputs (DOUBLE_INT_ASM_OP, file);
24318 else
24319 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24320 (long) high & 0xffffffff, (long) low & 0xffffffff);
24321 fprintf (file, "0x%lx%08lx\n",
24322 (long) high & 0xffffffff, (long) low & 0xffffffff);
24323 return;
24325 else
24327 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
24329 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24330 fputs ("\t.long ", file);
24331 else
24332 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24333 (long) high & 0xffffffff, (long) low & 0xffffffff);
24334 fprintf (file, "0x%lx,0x%lx\n",
24335 (long) high & 0xffffffff, (long) low & 0xffffffff);
24337 else
24339 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24340 fputs ("\t.long ", file);
24341 else
24342 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
24343 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
24345 return;
24349 if (GET_CODE (x) == CONST)
24351 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
24352 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
24354 base = XEXP (XEXP (x, 0), 0);
24355 offset = INTVAL (XEXP (XEXP (x, 0), 1));
24358 switch (GET_CODE (base))
24360 case SYMBOL_REF:
24361 name = XSTR (base, 0);
24362 break;
24364 case LABEL_REF:
24365 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
24366 CODE_LABEL_NUMBER (XEXP (base, 0)));
24367 break;
24369 case CODE_LABEL:
24370 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
24371 break;
24373 default:
24374 gcc_unreachable ();
24377 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24378 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
24379 else
24381 fputs ("\t.tc ", file);
24382 RS6000_OUTPUT_BASENAME (file, name);
24384 if (offset < 0)
24385 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
24386 else if (offset)
24387 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
24389 /* Mark large TOC symbols on AIX with [TE] so they are mapped
24390 after other TOC symbols, reducing overflow of small TOC access
24391 to [TC] symbols. */
24392 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
24393 ? "[TE]," : "[TC],", file);
24396 /* Currently C++ toc references to vtables can be emitted before it
24397 is decided whether the vtable is public or private. If this is
24398 the case, then the linker will eventually complain that there is
24399 a TOC reference to an unknown section. Thus, for vtables only,
24400 we emit the TOC reference to reference the symbol and not the
24401 section. */
24402 if (VTABLE_NAME_P (name))
24404 RS6000_OUTPUT_BASENAME (file, name);
24405 if (offset < 0)
24406 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
24407 else if (offset > 0)
24408 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
24410 else
24411 output_addr_const (file, x);
24413 #if HAVE_AS_TLS
24414 if (TARGET_XCOFF && SYMBOL_REF_P (base))
24416 switch (SYMBOL_REF_TLS_MODEL (base))
24418 case 0:
24419 break;
24420 case TLS_MODEL_LOCAL_EXEC:
24421 fputs ("@le", file);
24422 break;
24423 case TLS_MODEL_INITIAL_EXEC:
24424 fputs ("@ie", file);
24425 break;
24426 /* Use global-dynamic for local-dynamic. */
24427 case TLS_MODEL_GLOBAL_DYNAMIC:
24428 case TLS_MODEL_LOCAL_DYNAMIC:
24429 putc ('\n', file);
24430 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
24431 fputs ("\t.tc .", file);
24432 RS6000_OUTPUT_BASENAME (file, name);
24433 fputs ("[TC],", file);
24434 output_addr_const (file, x);
24435 fputs ("@m", file);
24436 break;
24437 default:
24438 gcc_unreachable ();
24441 #endif
24443 putc ('\n', file);
24446 /* Output an assembler pseudo-op to write an ASCII string of N characters
24447 starting at P to FILE.
24449 On the RS/6000, we have to do this using the .byte operation and
24450 write out special characters outside the quoted string.
24451 Also, the assembler is broken; very long strings are truncated,
24452 so we must artificially break them up early. */
24454 void
24455 output_ascii (FILE *file, const char *p, int n)
24457 char c;
24458 int i, count_string;
24459 const char *for_string = "\t.byte \"";
24460 const char *for_decimal = "\t.byte ";
24461 const char *to_close = NULL;
24463 count_string = 0;
24464 for (i = 0; i < n; i++)
24466 c = *p++;
24467 if (c >= ' ' && c < 0177)
24469 if (for_string)
24470 fputs (for_string, file);
24471 putc (c, file);
24473 /* Write two quotes to get one. */
24474 if (c == '"')
24476 putc (c, file);
24477 ++count_string;
24480 for_string = NULL;
24481 for_decimal = "\"\n\t.byte ";
24482 to_close = "\"\n";
24483 ++count_string;
24485 if (count_string >= 512)
24487 fputs (to_close, file);
24489 for_string = "\t.byte \"";
24490 for_decimal = "\t.byte ";
24491 to_close = NULL;
24492 count_string = 0;
24495 else
24497 if (for_decimal)
24498 fputs (for_decimal, file);
24499 fprintf (file, "%d", c);
24501 for_string = "\n\t.byte \"";
24502 for_decimal = ", ";
24503 to_close = "\n";
24504 count_string = 0;
24508 /* Now close the string if we have written one. Then end the line. */
24509 if (to_close)
24510 fputs (to_close, file);
24513 /* Generate a unique section name for FILENAME for a section type
24514 represented by SECTION_DESC. Output goes into BUF.
24516 SECTION_DESC can be any string, as long as it is different for each
24517 possible section type.
24519 We name the section in the same manner as xlc. The name begins with an
24520 underscore followed by the filename (after stripping any leading directory
24521 names) with the last period replaced by the string SECTION_DESC. If
24522 FILENAME does not contain a period, SECTION_DESC is appended to the end of
24523 the name. */
24525 void
24526 rs6000_gen_section_name (char **buf, const char *filename,
24527 const char *section_desc)
24529 const char *q, *after_last_slash, *last_period = 0;
24530 char *p;
24531 int len;
24533 after_last_slash = filename;
24534 for (q = filename; *q; q++)
24536 if (*q == '/')
24537 after_last_slash = q + 1;
24538 else if (*q == '.')
24539 last_period = q;
24542 len = strlen (after_last_slash) + strlen (section_desc) + 2;
24543 *buf = (char *) xmalloc (len);
24545 p = *buf;
24546 *p++ = '_';
24548 for (q = after_last_slash; *q; q++)
24550 if (q == last_period)
24552 strcpy (p, section_desc);
24553 p += strlen (section_desc);
24554 break;
24557 else if (ISALNUM (*q))
24558 *p++ = *q;
24561 if (last_period == 0)
24562 strcpy (p, section_desc);
24563 else
24564 *p = '\0';
24567 /* Emit profile function. */
24569 void
24570 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
24572 /* Non-standard profiling for kernels, which just saves LR then calls
24573 _mcount without worrying about arg saves. The idea is to change
24574 the function prologue as little as possible as it isn't easy to
24575 account for arg save/restore code added just for _mcount. */
24576 if (TARGET_PROFILE_KERNEL)
24577 return;
24579 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24581 #ifndef NO_PROFILE_COUNTERS
24582 # define NO_PROFILE_COUNTERS 0
24583 #endif
24584 if (NO_PROFILE_COUNTERS)
24585 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24586 LCT_NORMAL, VOIDmode);
24587 else
24589 char buf[30];
24590 const char *label_name;
24591 rtx fun;
24593 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24594 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
24595 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
24597 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24598 LCT_NORMAL, VOIDmode, fun, Pmode);
24601 else if (DEFAULT_ABI == ABI_DARWIN)
24603 const char *mcount_name = RS6000_MCOUNT;
24604 int caller_addr_regno = LR_REGNO;
24606 /* Be conservative and always set this, at least for now. */
24607 crtl->uses_pic_offset_table = 1;
24609 #if TARGET_MACHO
24610 /* For PIC code, set up a stub and collect the caller's address
24611 from r0, which is where the prologue puts it. */
24612 if (MACHOPIC_INDIRECT
24613 && crtl->uses_pic_offset_table)
24614 caller_addr_regno = 0;
24615 #endif
24616 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
24617 LCT_NORMAL, VOIDmode,
24618 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
24622 /* Write function profiler code. */
24624 void
24625 output_function_profiler (FILE *file, int labelno)
24627 char buf[100];
24629 switch (DEFAULT_ABI)
24631 default:
24632 gcc_unreachable ();
24634 case ABI_V4:
24635 if (!TARGET_32BIT)
24637 warning (0, "no profiling of 64-bit code for this ABI");
24638 return;
24640 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24641 fprintf (file, "\tmflr %s\n", reg_names[0]);
24642 if (NO_PROFILE_COUNTERS)
24644 asm_fprintf (file, "\tstw %s,4(%s)\n",
24645 reg_names[0], reg_names[1]);
24647 else if (TARGET_SECURE_PLT && flag_pic)
24649 if (TARGET_LINK_STACK)
24651 char name[32];
24652 get_ppc476_thunk_name (name);
24653 asm_fprintf (file, "\tbl %s\n", name);
24655 else
24656 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
24657 asm_fprintf (file, "\tstw %s,4(%s)\n",
24658 reg_names[0], reg_names[1]);
24659 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24660 asm_fprintf (file, "\taddis %s,%s,",
24661 reg_names[12], reg_names[12]);
24662 assemble_name (file, buf);
24663 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
24664 assemble_name (file, buf);
24665 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
24667 else if (flag_pic == 1)
24669 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
24670 asm_fprintf (file, "\tstw %s,4(%s)\n",
24671 reg_names[0], reg_names[1]);
24672 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24673 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
24674 assemble_name (file, buf);
24675 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
24677 else if (flag_pic > 1)
24679 asm_fprintf (file, "\tstw %s,4(%s)\n",
24680 reg_names[0], reg_names[1]);
24681 /* Now, we need to get the address of the label. */
24682 if (TARGET_LINK_STACK)
24684 char name[32];
24685 get_ppc476_thunk_name (name);
24686 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
24687 assemble_name (file, buf);
24688 fputs ("-.\n1:", file);
24689 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24690 asm_fprintf (file, "\taddi %s,%s,4\n",
24691 reg_names[11], reg_names[11]);
24693 else
24695 fputs ("\tbcl 20,31,1f\n\t.long ", file);
24696 assemble_name (file, buf);
24697 fputs ("-.\n1:", file);
24698 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24700 asm_fprintf (file, "\tlwz %s,0(%s)\n",
24701 reg_names[0], reg_names[11]);
24702 asm_fprintf (file, "\tadd %s,%s,%s\n",
24703 reg_names[0], reg_names[0], reg_names[11]);
24705 else
24707 asm_fprintf (file, "\tlis %s,", reg_names[12]);
24708 assemble_name (file, buf);
24709 fputs ("@ha\n", file);
24710 asm_fprintf (file, "\tstw %s,4(%s)\n",
24711 reg_names[0], reg_names[1]);
24712 asm_fprintf (file, "\tla %s,", reg_names[0]);
24713 assemble_name (file, buf);
24714 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
24717 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
24718 fprintf (file, "\tbl %s%s\n",
24719 RS6000_MCOUNT, flag_pic ? "@plt" : "");
24720 break;
24722 case ABI_AIX:
24723 case ABI_ELFv2:
24724 case ABI_DARWIN:
24725 /* Don't do anything, done in output_profile_hook (). */
24726 break;
24732 /* The following variable value is the last issued insn. */
24734 static rtx_insn *last_scheduled_insn;
24736 /* The following variable helps to balance issuing of load and
24737 store instructions */
24739 static int load_store_pendulum;
24741 /* The following variable helps pair divide insns during scheduling. */
24742 static int divide_cnt;
24743 /* The following variable helps pair and alternate vector and vector load
24744 insns during scheduling. */
24745 static int vec_pairing;
24748 /* Power4 load update and store update instructions are cracked into a
24749 load or store and an integer insn which are executed in the same cycle.
24750 Branches have their own dispatch slot which does not count against the
24751 GCC issue rate, but it changes the program flow so there are no other
24752 instructions to issue in this cycle. */
24754 static int
24755 rs6000_variable_issue_1 (rtx_insn *insn, int more)
24757 last_scheduled_insn = insn;
24758 if (GET_CODE (PATTERN (insn)) == USE
24759 || GET_CODE (PATTERN (insn)) == CLOBBER)
24761 cached_can_issue_more = more;
24762 return cached_can_issue_more;
24765 if (insn_terminates_group_p (insn, current_group))
24767 cached_can_issue_more = 0;
24768 return cached_can_issue_more;
24771 /* If no reservation, but reach here */
24772 if (recog_memoized (insn) < 0)
24773 return more;
24775 if (rs6000_sched_groups)
24777 if (is_microcoded_insn (insn))
24778 cached_can_issue_more = 0;
24779 else if (is_cracked_insn (insn))
24780 cached_can_issue_more = more > 2 ? more - 2 : 0;
24781 else
24782 cached_can_issue_more = more - 1;
24784 return cached_can_issue_more;
24787 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
24788 return 0;
24790 cached_can_issue_more = more - 1;
24791 return cached_can_issue_more;
24794 static int
24795 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
24797 int r = rs6000_variable_issue_1 (insn, more);
24798 if (verbose)
24799 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
24800 return r;
24803 /* Adjust the cost of a scheduling dependency. Return the new cost of
24804 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
24806 static int
24807 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
24808 unsigned int)
24810 enum attr_type attr_type;
24812 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
24813 return cost;
24815 switch (dep_type)
24817 case REG_DEP_TRUE:
24819 /* Data dependency; DEP_INSN writes a register that INSN reads
24820 some cycles later. */
24822 /* Separate a load from a narrower, dependent store. */
24823 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9
24824 || rs6000_tune == PROCESSOR_FUTURE)
24825 && GET_CODE (PATTERN (insn)) == SET
24826 && GET_CODE (PATTERN (dep_insn)) == SET
24827 && MEM_P (XEXP (PATTERN (insn), 1))
24828 && MEM_P (XEXP (PATTERN (dep_insn), 0))
24829 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
24830 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
24831 return cost + 14;
24833 attr_type = get_attr_type (insn);
24835 switch (attr_type)
24837 case TYPE_JMPREG:
24838 /* Tell the first scheduling pass about the latency between
24839 a mtctr and bctr (and mtlr and br/blr). The first
24840 scheduling pass will not know about this latency since
24841 the mtctr instruction, which has the latency associated
24842 to it, will be generated by reload. */
24843 return 4;
24844 case TYPE_BRANCH:
24845 /* Leave some extra cycles between a compare and its
24846 dependent branch, to inhibit expensive mispredicts. */
24847 if ((rs6000_tune == PROCESSOR_PPC603
24848 || rs6000_tune == PROCESSOR_PPC604
24849 || rs6000_tune == PROCESSOR_PPC604e
24850 || rs6000_tune == PROCESSOR_PPC620
24851 || rs6000_tune == PROCESSOR_PPC630
24852 || rs6000_tune == PROCESSOR_PPC750
24853 || rs6000_tune == PROCESSOR_PPC7400
24854 || rs6000_tune == PROCESSOR_PPC7450
24855 || rs6000_tune == PROCESSOR_PPCE5500
24856 || rs6000_tune == PROCESSOR_PPCE6500
24857 || rs6000_tune == PROCESSOR_POWER4
24858 || rs6000_tune == PROCESSOR_POWER5
24859 || rs6000_tune == PROCESSOR_POWER7
24860 || rs6000_tune == PROCESSOR_POWER8
24861 || rs6000_tune == PROCESSOR_POWER9
24862 || rs6000_tune == PROCESSOR_FUTURE
24863 || rs6000_tune == PROCESSOR_CELL)
24864 && recog_memoized (dep_insn)
24865 && (INSN_CODE (dep_insn) >= 0))
24867 switch (get_attr_type (dep_insn))
24869 case TYPE_CMP:
24870 case TYPE_FPCOMPARE:
24871 case TYPE_CR_LOGICAL:
24872 return cost + 2;
24873 case TYPE_EXTS:
24874 case TYPE_MUL:
24875 if (get_attr_dot (dep_insn) == DOT_YES)
24876 return cost + 2;
24877 else
24878 break;
24879 case TYPE_SHIFT:
24880 if (get_attr_dot (dep_insn) == DOT_YES
24881 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
24882 return cost + 2;
24883 else
24884 break;
24885 default:
24886 break;
24888 break;
24890 case TYPE_STORE:
24891 case TYPE_FPSTORE:
24892 if ((rs6000_tune == PROCESSOR_POWER6)
24893 && recog_memoized (dep_insn)
24894 && (INSN_CODE (dep_insn) >= 0))
24897 if (GET_CODE (PATTERN (insn)) != SET)
24898 /* If this happens, we have to extend this to schedule
24899 optimally. Return default for now. */
24900 return cost;
24902 /* Adjust the cost for the case where the value written
24903 by a fixed point operation is used as the address
24904 gen value on a store. */
24905 switch (get_attr_type (dep_insn))
24907 case TYPE_LOAD:
24908 case TYPE_CNTLZ:
24910 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24911 return get_attr_sign_extend (dep_insn)
24912 == SIGN_EXTEND_YES ? 6 : 4;
24913 break;
24915 case TYPE_SHIFT:
24917 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24918 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24919 6 : 3;
24920 break;
24922 case TYPE_INTEGER:
24923 case TYPE_ADD:
24924 case TYPE_LOGICAL:
24925 case TYPE_EXTS:
24926 case TYPE_INSERT:
24928 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24929 return 3;
24930 break;
24932 case TYPE_STORE:
24933 case TYPE_FPLOAD:
24934 case TYPE_FPSTORE:
24936 if (get_attr_update (dep_insn) == UPDATE_YES
24937 && ! rs6000_store_data_bypass_p (dep_insn, insn))
24938 return 3;
24939 break;
24941 case TYPE_MUL:
24943 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24944 return 17;
24945 break;
24947 case TYPE_DIV:
24949 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24950 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
24951 break;
24953 default:
24954 break;
24957 break;
24959 case TYPE_LOAD:
24960 if ((rs6000_tune == PROCESSOR_POWER6)
24961 && recog_memoized (dep_insn)
24962 && (INSN_CODE (dep_insn) >= 0))
24965 /* Adjust the cost for the case where the value written
24966 by a fixed point instruction is used within the address
24967 gen portion of a subsequent load(u)(x) */
24968 switch (get_attr_type (dep_insn))
24970 case TYPE_LOAD:
24971 case TYPE_CNTLZ:
24973 if (set_to_load_agen (dep_insn, insn))
24974 return get_attr_sign_extend (dep_insn)
24975 == SIGN_EXTEND_YES ? 6 : 4;
24976 break;
24978 case TYPE_SHIFT:
24980 if (set_to_load_agen (dep_insn, insn))
24981 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24982 6 : 3;
24983 break;
24985 case TYPE_INTEGER:
24986 case TYPE_ADD:
24987 case TYPE_LOGICAL:
24988 case TYPE_EXTS:
24989 case TYPE_INSERT:
24991 if (set_to_load_agen (dep_insn, insn))
24992 return 3;
24993 break;
24995 case TYPE_STORE:
24996 case TYPE_FPLOAD:
24997 case TYPE_FPSTORE:
24999 if (get_attr_update (dep_insn) == UPDATE_YES
25000 && set_to_load_agen (dep_insn, insn))
25001 return 3;
25002 break;
25004 case TYPE_MUL:
25006 if (set_to_load_agen (dep_insn, insn))
25007 return 17;
25008 break;
25010 case TYPE_DIV:
25012 if (set_to_load_agen (dep_insn, insn))
25013 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
25014 break;
25016 default:
25017 break;
25020 break;
25022 case TYPE_FPLOAD:
25023 if ((rs6000_tune == PROCESSOR_POWER6)
25024 && get_attr_update (insn) == UPDATE_NO
25025 && recog_memoized (dep_insn)
25026 && (INSN_CODE (dep_insn) >= 0)
25027 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
25028 return 2;
25030 default:
25031 break;
25034 /* Fall out to return default cost. */
25036 break;
25038 case REG_DEP_OUTPUT:
25039 /* Output dependency; DEP_INSN writes a register that INSN writes some
25040 cycles later. */
25041 if ((rs6000_tune == PROCESSOR_POWER6)
25042 && recog_memoized (dep_insn)
25043 && (INSN_CODE (dep_insn) >= 0))
25045 attr_type = get_attr_type (insn);
25047 switch (attr_type)
25049 case TYPE_FP:
25050 case TYPE_FPSIMPLE:
25051 if (get_attr_type (dep_insn) == TYPE_FP
25052 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
25053 return 1;
25054 break;
25055 case TYPE_FPLOAD:
25056 if (get_attr_update (insn) == UPDATE_NO
25057 && get_attr_type (dep_insn) == TYPE_MFFGPR)
25058 return 2;
25059 break;
25060 default:
25061 break;
25064 /* Fall through, no cost for output dependency. */
25065 /* FALLTHRU */
25067 case REG_DEP_ANTI:
25068 /* Anti dependency; DEP_INSN reads a register that INSN writes some
25069 cycles later. */
25070 return 0;
25072 default:
25073 gcc_unreachable ();
25076 return cost;
25079 /* Debug version of rs6000_adjust_cost. */
25081 static int
25082 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
25083 int cost, unsigned int dw)
25085 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
25087 if (ret != cost)
25089 const char *dep;
25091 switch (dep_type)
25093 default: dep = "unknown depencency"; break;
25094 case REG_DEP_TRUE: dep = "data dependency"; break;
25095 case REG_DEP_OUTPUT: dep = "output dependency"; break;
25096 case REG_DEP_ANTI: dep = "anti depencency"; break;
25099 fprintf (stderr,
25100 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
25101 "%s, insn:\n", ret, cost, dep);
25103 debug_rtx (insn);
25106 return ret;
25109 /* The function returns a true if INSN is microcoded.
25110 Return false otherwise. */
25112 static bool
25113 is_microcoded_insn (rtx_insn *insn)
25115 if (!insn || !NONDEBUG_INSN_P (insn)
25116 || GET_CODE (PATTERN (insn)) == USE
25117 || GET_CODE (PATTERN (insn)) == CLOBBER)
25118 return false;
25120 if (rs6000_tune == PROCESSOR_CELL)
25121 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
25123 if (rs6000_sched_groups
25124 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25126 enum attr_type type = get_attr_type (insn);
25127 if ((type == TYPE_LOAD
25128 && get_attr_update (insn) == UPDATE_YES
25129 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
25130 || ((type == TYPE_LOAD || type == TYPE_STORE)
25131 && get_attr_update (insn) == UPDATE_YES
25132 && get_attr_indexed (insn) == INDEXED_YES)
25133 || type == TYPE_MFCR)
25134 return true;
25137 return false;
25140 /* The function returns true if INSN is cracked into 2 instructions
25141 by the processor (and therefore occupies 2 issue slots). */
25143 static bool
25144 is_cracked_insn (rtx_insn *insn)
25146 if (!insn || !NONDEBUG_INSN_P (insn)
25147 || GET_CODE (PATTERN (insn)) == USE
25148 || GET_CODE (PATTERN (insn)) == CLOBBER)
25149 return false;
25151 if (rs6000_sched_groups
25152 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25154 enum attr_type type = get_attr_type (insn);
25155 if ((type == TYPE_LOAD
25156 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
25157 && get_attr_update (insn) == UPDATE_NO)
25158 || (type == TYPE_LOAD
25159 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
25160 && get_attr_update (insn) == UPDATE_YES
25161 && get_attr_indexed (insn) == INDEXED_NO)
25162 || (type == TYPE_STORE
25163 && get_attr_update (insn) == UPDATE_YES
25164 && get_attr_indexed (insn) == INDEXED_NO)
25165 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
25166 && get_attr_update (insn) == UPDATE_YES)
25167 || (type == TYPE_CR_LOGICAL
25168 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
25169 || (type == TYPE_EXTS
25170 && get_attr_dot (insn) == DOT_YES)
25171 || (type == TYPE_SHIFT
25172 && get_attr_dot (insn) == DOT_YES
25173 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
25174 || (type == TYPE_MUL
25175 && get_attr_dot (insn) == DOT_YES)
25176 || type == TYPE_DIV
25177 || (type == TYPE_INSERT
25178 && get_attr_size (insn) == SIZE_32))
25179 return true;
25182 return false;
25185 /* The function returns true if INSN can be issued only from
25186 the branch slot. */
25188 static bool
25189 is_branch_slot_insn (rtx_insn *insn)
25191 if (!insn || !NONDEBUG_INSN_P (insn)
25192 || GET_CODE (PATTERN (insn)) == USE
25193 || GET_CODE (PATTERN (insn)) == CLOBBER)
25194 return false;
25196 if (rs6000_sched_groups)
25198 enum attr_type type = get_attr_type (insn);
25199 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
25200 return true;
25201 return false;
25204 return false;
25207 /* The function returns true if out_inst sets a value that is
25208 used in the address generation computation of in_insn */
25209 static bool
25210 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
25212 rtx out_set, in_set;
25214 /* For performance reasons, only handle the simple case where
25215 both loads are a single_set. */
25216 out_set = single_set (out_insn);
25217 if (out_set)
25219 in_set = single_set (in_insn);
25220 if (in_set)
25221 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
25224 return false;
25227 /* Try to determine base/offset/size parts of the given MEM.
25228 Return true if successful, false if all the values couldn't
25229 be determined.
25231 This function only looks for REG or REG+CONST address forms.
25232 REG+REG address form will return false. */
25234 static bool
25235 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
25236 HOST_WIDE_INT *size)
25238 rtx addr_rtx;
25239 if MEM_SIZE_KNOWN_P (mem)
25240 *size = MEM_SIZE (mem);
25241 else
25242 return false;
25244 addr_rtx = (XEXP (mem, 0));
25245 if (GET_CODE (addr_rtx) == PRE_MODIFY)
25246 addr_rtx = XEXP (addr_rtx, 1);
25248 *offset = 0;
25249 while (GET_CODE (addr_rtx) == PLUS
25250 && CONST_INT_P (XEXP (addr_rtx, 1)))
25252 *offset += INTVAL (XEXP (addr_rtx, 1));
25253 addr_rtx = XEXP (addr_rtx, 0);
25255 if (!REG_P (addr_rtx))
25256 return false;
25258 *base = addr_rtx;
25259 return true;
25262 /* The function returns true if the target storage location of
25263 mem1 is adjacent to the target storage location of mem2 */
25264 /* Return 1 if memory locations are adjacent. */
25266 static bool
25267 adjacent_mem_locations (rtx mem1, rtx mem2)
25269 rtx reg1, reg2;
25270 HOST_WIDE_INT off1, size1, off2, size2;
25272 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25273 && get_memref_parts (mem2, &reg2, &off2, &size2))
25274 return ((REGNO (reg1) == REGNO (reg2))
25275 && ((off1 + size1 == off2)
25276 || (off2 + size2 == off1)));
25278 return false;
25281 /* This function returns true if it can be determined that the two MEM
25282 locations overlap by at least 1 byte based on base reg/offset/size. */
25284 static bool
25285 mem_locations_overlap (rtx mem1, rtx mem2)
25287 rtx reg1, reg2;
25288 HOST_WIDE_INT off1, size1, off2, size2;
25290 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25291 && get_memref_parts (mem2, &reg2, &off2, &size2))
25292 return ((REGNO (reg1) == REGNO (reg2))
25293 && (((off1 <= off2) && (off1 + size1 > off2))
25294 || ((off2 <= off1) && (off2 + size2 > off1))));
25296 return false;
25299 /* A C statement (sans semicolon) to update the integer scheduling
25300 priority INSN_PRIORITY (INSN). Increase the priority to execute the
25301 INSN earlier, reduce the priority to execute INSN later. Do not
25302 define this macro if you do not need to adjust the scheduling
25303 priorities of insns. */
25305 static int
25306 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
25308 rtx load_mem, str_mem;
25309 /* On machines (like the 750) which have asymmetric integer units,
25310 where one integer unit can do multiply and divides and the other
25311 can't, reduce the priority of multiply/divide so it is scheduled
25312 before other integer operations. */
25314 #if 0
25315 if (! INSN_P (insn))
25316 return priority;
25318 if (GET_CODE (PATTERN (insn)) == USE)
25319 return priority;
25321 switch (rs6000_tune) {
25322 case PROCESSOR_PPC750:
25323 switch (get_attr_type (insn))
25325 default:
25326 break;
25328 case TYPE_MUL:
25329 case TYPE_DIV:
25330 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
25331 priority, priority);
25332 if (priority >= 0 && priority < 0x01000000)
25333 priority >>= 3;
25334 break;
25337 #endif
25339 if (insn_must_be_first_in_group (insn)
25340 && reload_completed
25341 && current_sched_info->sched_max_insns_priority
25342 && rs6000_sched_restricted_insns_priority)
25345 /* Prioritize insns that can be dispatched only in the first
25346 dispatch slot. */
25347 if (rs6000_sched_restricted_insns_priority == 1)
25348 /* Attach highest priority to insn. This means that in
25349 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
25350 precede 'priority' (critical path) considerations. */
25351 return current_sched_info->sched_max_insns_priority;
25352 else if (rs6000_sched_restricted_insns_priority == 2)
25353 /* Increase priority of insn by a minimal amount. This means that in
25354 haifa-sched.c:ready_sort(), only 'priority' (critical path)
25355 considerations precede dispatch-slot restriction considerations. */
25356 return (priority + 1);
25359 if (rs6000_tune == PROCESSOR_POWER6
25360 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
25361 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
25362 /* Attach highest priority to insn if the scheduler has just issued two
25363 stores and this instruction is a load, or two loads and this instruction
25364 is a store. Power6 wants loads and stores scheduled alternately
25365 when possible */
25366 return current_sched_info->sched_max_insns_priority;
25368 return priority;
25371 /* Return true if the instruction is nonpipelined on the Cell. */
25372 static bool
25373 is_nonpipeline_insn (rtx_insn *insn)
25375 enum attr_type type;
25376 if (!insn || !NONDEBUG_INSN_P (insn)
25377 || GET_CODE (PATTERN (insn)) == USE
25378 || GET_CODE (PATTERN (insn)) == CLOBBER)
25379 return false;
25381 type = get_attr_type (insn);
25382 if (type == TYPE_MUL
25383 || type == TYPE_DIV
25384 || type == TYPE_SDIV
25385 || type == TYPE_DDIV
25386 || type == TYPE_SSQRT
25387 || type == TYPE_DSQRT
25388 || type == TYPE_MFCR
25389 || type == TYPE_MFCRF
25390 || type == TYPE_MFJMPR)
25392 return true;
25394 return false;
25398 /* Return how many instructions the machine can issue per cycle. */
25400 static int
25401 rs6000_issue_rate (void)
25403 /* Unless scheduling for register pressure, use issue rate of 1 for
25404 first scheduling pass to decrease degradation. */
25405 if (!reload_completed && !flag_sched_pressure)
25406 return 1;
25408 switch (rs6000_tune) {
25409 case PROCESSOR_RS64A:
25410 case PROCESSOR_PPC601: /* ? */
25411 case PROCESSOR_PPC7450:
25412 return 3;
25413 case PROCESSOR_PPC440:
25414 case PROCESSOR_PPC603:
25415 case PROCESSOR_PPC750:
25416 case PROCESSOR_PPC7400:
25417 case PROCESSOR_PPC8540:
25418 case PROCESSOR_PPC8548:
25419 case PROCESSOR_CELL:
25420 case PROCESSOR_PPCE300C2:
25421 case PROCESSOR_PPCE300C3:
25422 case PROCESSOR_PPCE500MC:
25423 case PROCESSOR_PPCE500MC64:
25424 case PROCESSOR_PPCE5500:
25425 case PROCESSOR_PPCE6500:
25426 case PROCESSOR_TITAN:
25427 return 2;
25428 case PROCESSOR_PPC476:
25429 case PROCESSOR_PPC604:
25430 case PROCESSOR_PPC604e:
25431 case PROCESSOR_PPC620:
25432 case PROCESSOR_PPC630:
25433 return 4;
25434 case PROCESSOR_POWER4:
25435 case PROCESSOR_POWER5:
25436 case PROCESSOR_POWER6:
25437 case PROCESSOR_POWER7:
25438 return 5;
25439 case PROCESSOR_POWER8:
25440 return 7;
25441 case PROCESSOR_POWER9:
25442 case PROCESSOR_FUTURE:
25443 return 6;
25444 default:
25445 return 1;
25449 /* Return how many instructions to look ahead for better insn
25450 scheduling. */
25452 static int
25453 rs6000_use_sched_lookahead (void)
25455 switch (rs6000_tune)
25457 case PROCESSOR_PPC8540:
25458 case PROCESSOR_PPC8548:
25459 return 4;
25461 case PROCESSOR_CELL:
25462 return (reload_completed ? 8 : 0);
25464 default:
25465 return 0;
25469 /* We are choosing insn from the ready queue. Return zero if INSN can be
25470 chosen. */
25471 static int
25472 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
25474 if (ready_index == 0)
25475 return 0;
25477 if (rs6000_tune != PROCESSOR_CELL)
25478 return 0;
25480 gcc_assert (insn != NULL_RTX && INSN_P (insn));
25482 if (!reload_completed
25483 || is_nonpipeline_insn (insn)
25484 || is_microcoded_insn (insn))
25485 return 1;
25487 return 0;
25490 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
25491 and return true. */
25493 static bool
25494 find_mem_ref (rtx pat, rtx *mem_ref)
25496 const char * fmt;
25497 int i, j;
25499 /* stack_tie does not produce any real memory traffic. */
25500 if (tie_operand (pat, VOIDmode))
25501 return false;
25503 if (MEM_P (pat))
25505 *mem_ref = pat;
25506 return true;
25509 /* Recursively process the pattern. */
25510 fmt = GET_RTX_FORMAT (GET_CODE (pat));
25512 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
25514 if (fmt[i] == 'e')
25516 if (find_mem_ref (XEXP (pat, i), mem_ref))
25517 return true;
25519 else if (fmt[i] == 'E')
25520 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
25522 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
25523 return true;
25527 return false;
25530 /* Determine if PAT is a PATTERN of a load insn. */
25532 static bool
25533 is_load_insn1 (rtx pat, rtx *load_mem)
25535 if (!pat || pat == NULL_RTX)
25536 return false;
25538 if (GET_CODE (pat) == SET)
25539 return find_mem_ref (SET_SRC (pat), load_mem);
25541 if (GET_CODE (pat) == PARALLEL)
25543 int i;
25545 for (i = 0; i < XVECLEN (pat, 0); i++)
25546 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
25547 return true;
25550 return false;
25553 /* Determine if INSN loads from memory. */
25555 static bool
25556 is_load_insn (rtx insn, rtx *load_mem)
25558 if (!insn || !INSN_P (insn))
25559 return false;
25561 if (CALL_P (insn))
25562 return false;
25564 return is_load_insn1 (PATTERN (insn), load_mem);
25567 /* Determine if PAT is a PATTERN of a store insn. */
25569 static bool
25570 is_store_insn1 (rtx pat, rtx *str_mem)
25572 if (!pat || pat == NULL_RTX)
25573 return false;
25575 if (GET_CODE (pat) == SET)
25576 return find_mem_ref (SET_DEST (pat), str_mem);
25578 if (GET_CODE (pat) == PARALLEL)
25580 int i;
25582 for (i = 0; i < XVECLEN (pat, 0); i++)
25583 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
25584 return true;
25587 return false;
25590 /* Determine if INSN stores to memory. */
25592 static bool
25593 is_store_insn (rtx insn, rtx *str_mem)
25595 if (!insn || !INSN_P (insn))
25596 return false;
25598 return is_store_insn1 (PATTERN (insn), str_mem);
25601 /* Return whether TYPE is a Power9 pairable vector instruction type. */
25603 static bool
25604 is_power9_pairable_vec_type (enum attr_type type)
25606 switch (type)
25608 case TYPE_VECSIMPLE:
25609 case TYPE_VECCOMPLEX:
25610 case TYPE_VECDIV:
25611 case TYPE_VECCMP:
25612 case TYPE_VECPERM:
25613 case TYPE_VECFLOAT:
25614 case TYPE_VECFDIV:
25615 case TYPE_VECDOUBLE:
25616 return true;
25617 default:
25618 break;
25620 return false;
25623 /* Returns whether the dependence between INSN and NEXT is considered
25624 costly by the given target. */
25626 static bool
25627 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
25629 rtx insn;
25630 rtx next;
25631 rtx load_mem, str_mem;
25633 /* If the flag is not enabled - no dependence is considered costly;
25634 allow all dependent insns in the same group.
25635 This is the most aggressive option. */
25636 if (rs6000_sched_costly_dep == no_dep_costly)
25637 return false;
25639 /* If the flag is set to 1 - a dependence is always considered costly;
25640 do not allow dependent instructions in the same group.
25641 This is the most conservative option. */
25642 if (rs6000_sched_costly_dep == all_deps_costly)
25643 return true;
25645 insn = DEP_PRO (dep);
25646 next = DEP_CON (dep);
25648 if (rs6000_sched_costly_dep == store_to_load_dep_costly
25649 && is_load_insn (next, &load_mem)
25650 && is_store_insn (insn, &str_mem))
25651 /* Prevent load after store in the same group. */
25652 return true;
25654 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
25655 && is_load_insn (next, &load_mem)
25656 && is_store_insn (insn, &str_mem)
25657 && DEP_TYPE (dep) == REG_DEP_TRUE
25658 && mem_locations_overlap(str_mem, load_mem))
25659 /* Prevent load after store in the same group if it is a true
25660 dependence. */
25661 return true;
25663 /* The flag is set to X; dependences with latency >= X are considered costly,
25664 and will not be scheduled in the same group. */
25665 if (rs6000_sched_costly_dep <= max_dep_latency
25666 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
25667 return true;
25669 return false;
25672 /* Return the next insn after INSN that is found before TAIL is reached,
25673 skipping any "non-active" insns - insns that will not actually occupy
25674 an issue slot. Return NULL_RTX if such an insn is not found. */
25676 static rtx_insn *
25677 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
25679 if (insn == NULL_RTX || insn == tail)
25680 return NULL;
25682 while (1)
25684 insn = NEXT_INSN (insn);
25685 if (insn == NULL_RTX || insn == tail)
25686 return NULL;
25688 if (CALL_P (insn)
25689 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
25690 || (NONJUMP_INSN_P (insn)
25691 && GET_CODE (PATTERN (insn)) != USE
25692 && GET_CODE (PATTERN (insn)) != CLOBBER
25693 && INSN_CODE (insn) != CODE_FOR_stack_tie))
25694 break;
25696 return insn;
25699 /* Do Power9 specific sched_reorder2 reordering of ready list. */
25701 static int
25702 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
25704 int pos;
25705 int i;
25706 rtx_insn *tmp;
25707 enum attr_type type, type2;
25709 type = get_attr_type (last_scheduled_insn);
25711 /* Try to issue fixed point divides back-to-back in pairs so they will be
25712 routed to separate execution units and execute in parallel. */
25713 if (type == TYPE_DIV && divide_cnt == 0)
25715 /* First divide has been scheduled. */
25716 divide_cnt = 1;
25718 /* Scan the ready list looking for another divide, if found move it
25719 to the end of the list so it is chosen next. */
25720 pos = lastpos;
25721 while (pos >= 0)
25723 if (recog_memoized (ready[pos]) >= 0
25724 && get_attr_type (ready[pos]) == TYPE_DIV)
25726 tmp = ready[pos];
25727 for (i = pos; i < lastpos; i++)
25728 ready[i] = ready[i + 1];
25729 ready[lastpos] = tmp;
25730 break;
25732 pos--;
25735 else
25737 /* Last insn was the 2nd divide or not a divide, reset the counter. */
25738 divide_cnt = 0;
25740 /* The best dispatch throughput for vector and vector load insns can be
25741 achieved by interleaving a vector and vector load such that they'll
25742 dispatch to the same superslice. If this pairing cannot be achieved
25743 then it is best to pair vector insns together and vector load insns
25744 together.
25746 To aid in this pairing, vec_pairing maintains the current state with
25747 the following values:
25749 0 : Initial state, no vecload/vector pairing has been started.
25751 1 : A vecload or vector insn has been issued and a candidate for
25752 pairing has been found and moved to the end of the ready
25753 list. */
25754 if (type == TYPE_VECLOAD)
25756 /* Issued a vecload. */
25757 if (vec_pairing == 0)
25759 int vecload_pos = -1;
25760 /* We issued a single vecload, look for a vector insn to pair it
25761 with. If one isn't found, try to pair another vecload. */
25762 pos = lastpos;
25763 while (pos >= 0)
25765 if (recog_memoized (ready[pos]) >= 0)
25767 type2 = get_attr_type (ready[pos]);
25768 if (is_power9_pairable_vec_type (type2))
25770 /* Found a vector insn to pair with, move it to the
25771 end of the ready list so it is scheduled next. */
25772 tmp = ready[pos];
25773 for (i = pos; i < lastpos; i++)
25774 ready[i] = ready[i + 1];
25775 ready[lastpos] = tmp;
25776 vec_pairing = 1;
25777 return cached_can_issue_more;
25779 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
25780 /* Remember position of first vecload seen. */
25781 vecload_pos = pos;
25783 pos--;
25785 if (vecload_pos >= 0)
25787 /* Didn't find a vector to pair with but did find a vecload,
25788 move it to the end of the ready list. */
25789 tmp = ready[vecload_pos];
25790 for (i = vecload_pos; i < lastpos; i++)
25791 ready[i] = ready[i + 1];
25792 ready[lastpos] = tmp;
25793 vec_pairing = 1;
25794 return cached_can_issue_more;
25798 else if (is_power9_pairable_vec_type (type))
25800 /* Issued a vector operation. */
25801 if (vec_pairing == 0)
25803 int vec_pos = -1;
25804 /* We issued a single vector insn, look for a vecload to pair it
25805 with. If one isn't found, try to pair another vector. */
25806 pos = lastpos;
25807 while (pos >= 0)
25809 if (recog_memoized (ready[pos]) >= 0)
25811 type2 = get_attr_type (ready[pos]);
25812 if (type2 == TYPE_VECLOAD)
25814 /* Found a vecload insn to pair with, move it to the
25815 end of the ready list so it is scheduled next. */
25816 tmp = ready[pos];
25817 for (i = pos; i < lastpos; i++)
25818 ready[i] = ready[i + 1];
25819 ready[lastpos] = tmp;
25820 vec_pairing = 1;
25821 return cached_can_issue_more;
25823 else if (is_power9_pairable_vec_type (type2)
25824 && vec_pos == -1)
25825 /* Remember position of first vector insn seen. */
25826 vec_pos = pos;
25828 pos--;
25830 if (vec_pos >= 0)
25832 /* Didn't find a vecload to pair with but did find a vector
25833 insn, move it to the end of the ready list. */
25834 tmp = ready[vec_pos];
25835 for (i = vec_pos; i < lastpos; i++)
25836 ready[i] = ready[i + 1];
25837 ready[lastpos] = tmp;
25838 vec_pairing = 1;
25839 return cached_can_issue_more;
25844 /* We've either finished a vec/vecload pair, couldn't find an insn to
25845 continue the current pair, or the last insn had nothing to do with
25846 with pairing. In any case, reset the state. */
25847 vec_pairing = 0;
25850 return cached_can_issue_more;
25853 /* We are about to begin issuing insns for this clock cycle. */
25855 static int
25856 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
25857 rtx_insn **ready ATTRIBUTE_UNUSED,
25858 int *pn_ready ATTRIBUTE_UNUSED,
25859 int clock_var ATTRIBUTE_UNUSED)
25861 int n_ready = *pn_ready;
25863 if (sched_verbose)
25864 fprintf (dump, "// rs6000_sched_reorder :\n");
25866 /* Reorder the ready list, if the second to last ready insn
25867 is a nonepipeline insn. */
25868 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
25870 if (is_nonpipeline_insn (ready[n_ready - 1])
25871 && (recog_memoized (ready[n_ready - 2]) > 0))
25872 /* Simply swap first two insns. */
25873 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
25876 if (rs6000_tune == PROCESSOR_POWER6)
25877 load_store_pendulum = 0;
25879 return rs6000_issue_rate ();
25882 /* Like rs6000_sched_reorder, but called after issuing each insn. */
25884 static int
25885 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
25886 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
25888 if (sched_verbose)
25889 fprintf (dump, "// rs6000_sched_reorder2 :\n");
25891 /* For Power6, we need to handle some special cases to try and keep the
25892 store queue from overflowing and triggering expensive flushes.
25894 This code monitors how load and store instructions are being issued
25895 and skews the ready list one way or the other to increase the likelihood
25896 that a desired instruction is issued at the proper time.
25898 A couple of things are done. First, we maintain a "load_store_pendulum"
25899 to track the current state of load/store issue.
25901 - If the pendulum is at zero, then no loads or stores have been
25902 issued in the current cycle so we do nothing.
25904 - If the pendulum is 1, then a single load has been issued in this
25905 cycle and we attempt to locate another load in the ready list to
25906 issue with it.
25908 - If the pendulum is -2, then two stores have already been
25909 issued in this cycle, so we increase the priority of the first load
25910 in the ready list to increase it's likelihood of being chosen first
25911 in the next cycle.
25913 - If the pendulum is -1, then a single store has been issued in this
25914 cycle and we attempt to locate another store in the ready list to
25915 issue with it, preferring a store to an adjacent memory location to
25916 facilitate store pairing in the store queue.
25918 - If the pendulum is 2, then two loads have already been
25919 issued in this cycle, so we increase the priority of the first store
25920 in the ready list to increase it's likelihood of being chosen first
25921 in the next cycle.
25923 - If the pendulum < -2 or > 2, then do nothing.
25925 Note: This code covers the most common scenarios. There exist non
25926 load/store instructions which make use of the LSU and which
25927 would need to be accounted for to strictly model the behavior
25928 of the machine. Those instructions are currently unaccounted
25929 for to help minimize compile time overhead of this code.
25931 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
25933 int pos;
25934 int i;
25935 rtx_insn *tmp;
25936 rtx load_mem, str_mem;
25938 if (is_store_insn (last_scheduled_insn, &str_mem))
25939 /* Issuing a store, swing the load_store_pendulum to the left */
25940 load_store_pendulum--;
25941 else if (is_load_insn (last_scheduled_insn, &load_mem))
25942 /* Issuing a load, swing the load_store_pendulum to the right */
25943 load_store_pendulum++;
25944 else
25945 return cached_can_issue_more;
25947 /* If the pendulum is balanced, or there is only one instruction on
25948 the ready list, then all is well, so return. */
25949 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
25950 return cached_can_issue_more;
25952 if (load_store_pendulum == 1)
25954 /* A load has been issued in this cycle. Scan the ready list
25955 for another load to issue with it */
25956 pos = *pn_ready-1;
25958 while (pos >= 0)
25960 if (is_load_insn (ready[pos], &load_mem))
25962 /* Found a load. Move it to the head of the ready list,
25963 and adjust it's priority so that it is more likely to
25964 stay there */
25965 tmp = ready[pos];
25966 for (i=pos; i<*pn_ready-1; i++)
25967 ready[i] = ready[i + 1];
25968 ready[*pn_ready-1] = tmp;
25970 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25971 INSN_PRIORITY (tmp)++;
25972 break;
25974 pos--;
25977 else if (load_store_pendulum == -2)
25979 /* Two stores have been issued in this cycle. Increase the
25980 priority of the first load in the ready list to favor it for
25981 issuing in the next cycle. */
25982 pos = *pn_ready-1;
25984 while (pos >= 0)
25986 if (is_load_insn (ready[pos], &load_mem)
25987 && !sel_sched_p ()
25988 && INSN_PRIORITY_KNOWN (ready[pos]))
25990 INSN_PRIORITY (ready[pos])++;
25992 /* Adjust the pendulum to account for the fact that a load
25993 was found and increased in priority. This is to prevent
25994 increasing the priority of multiple loads */
25995 load_store_pendulum--;
25997 break;
25999 pos--;
26002 else if (load_store_pendulum == -1)
26004 /* A store has been issued in this cycle. Scan the ready list for
26005 another store to issue with it, preferring a store to an adjacent
26006 memory location */
26007 int first_store_pos = -1;
26009 pos = *pn_ready-1;
26011 while (pos >= 0)
26013 if (is_store_insn (ready[pos], &str_mem))
26015 rtx str_mem2;
26016 /* Maintain the index of the first store found on the
26017 list */
26018 if (first_store_pos == -1)
26019 first_store_pos = pos;
26021 if (is_store_insn (last_scheduled_insn, &str_mem2)
26022 && adjacent_mem_locations (str_mem, str_mem2))
26024 /* Found an adjacent store. Move it to the head of the
26025 ready list, and adjust it's priority so that it is
26026 more likely to stay there */
26027 tmp = ready[pos];
26028 for (i=pos; i<*pn_ready-1; i++)
26029 ready[i] = ready[i + 1];
26030 ready[*pn_ready-1] = tmp;
26032 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
26033 INSN_PRIORITY (tmp)++;
26035 first_store_pos = -1;
26037 break;
26040 pos--;
26043 if (first_store_pos >= 0)
26045 /* An adjacent store wasn't found, but a non-adjacent store was,
26046 so move the non-adjacent store to the front of the ready
26047 list, and adjust its priority so that it is more likely to
26048 stay there. */
26049 tmp = ready[first_store_pos];
26050 for (i=first_store_pos; i<*pn_ready-1; i++)
26051 ready[i] = ready[i + 1];
26052 ready[*pn_ready-1] = tmp;
26053 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
26054 INSN_PRIORITY (tmp)++;
26057 else if (load_store_pendulum == 2)
26059 /* Two loads have been issued in this cycle. Increase the priority
26060 of the first store in the ready list to favor it for issuing in
26061 the next cycle. */
26062 pos = *pn_ready-1;
26064 while (pos >= 0)
26066 if (is_store_insn (ready[pos], &str_mem)
26067 && !sel_sched_p ()
26068 && INSN_PRIORITY_KNOWN (ready[pos]))
26070 INSN_PRIORITY (ready[pos])++;
26072 /* Adjust the pendulum to account for the fact that a store
26073 was found and increased in priority. This is to prevent
26074 increasing the priority of multiple stores */
26075 load_store_pendulum++;
26077 break;
26079 pos--;
26084 /* Do Power9 dependent reordering if necessary. */
26085 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
26086 && recog_memoized (last_scheduled_insn) >= 0)
26087 return power9_sched_reorder2 (ready, *pn_ready - 1);
26089 return cached_can_issue_more;
26092 /* Return whether the presence of INSN causes a dispatch group termination
26093 of group WHICH_GROUP.
26095 If WHICH_GROUP == current_group, this function will return true if INSN
26096 causes the termination of the current group (i.e, the dispatch group to
26097 which INSN belongs). This means that INSN will be the last insn in the
26098 group it belongs to.
26100 If WHICH_GROUP == previous_group, this function will return true if INSN
26101 causes the termination of the previous group (i.e, the dispatch group that
26102 precedes the group to which INSN belongs). This means that INSN will be
26103 the first insn in the group it belongs to). */
26105 static bool
26106 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
26108 bool first, last;
26110 if (! insn)
26111 return false;
26113 first = insn_must_be_first_in_group (insn);
26114 last = insn_must_be_last_in_group (insn);
26116 if (first && last)
26117 return true;
26119 if (which_group == current_group)
26120 return last;
26121 else if (which_group == previous_group)
26122 return first;
26124 return false;
26128 static bool
26129 insn_must_be_first_in_group (rtx_insn *insn)
26131 enum attr_type type;
26133 if (!insn
26134 || NOTE_P (insn)
26135 || DEBUG_INSN_P (insn)
26136 || GET_CODE (PATTERN (insn)) == USE
26137 || GET_CODE (PATTERN (insn)) == CLOBBER)
26138 return false;
26140 switch (rs6000_tune)
26142 case PROCESSOR_POWER5:
26143 if (is_cracked_insn (insn))
26144 return true;
26145 /* FALLTHRU */
26146 case PROCESSOR_POWER4:
26147 if (is_microcoded_insn (insn))
26148 return true;
26150 if (!rs6000_sched_groups)
26151 return false;
26153 type = get_attr_type (insn);
26155 switch (type)
26157 case TYPE_MFCR:
26158 case TYPE_MFCRF:
26159 case TYPE_MTCR:
26160 case TYPE_CR_LOGICAL:
26161 case TYPE_MTJMPR:
26162 case TYPE_MFJMPR:
26163 case TYPE_DIV:
26164 case TYPE_LOAD_L:
26165 case TYPE_STORE_C:
26166 case TYPE_ISYNC:
26167 case TYPE_SYNC:
26168 return true;
26169 default:
26170 break;
26172 break;
26173 case PROCESSOR_POWER6:
26174 type = get_attr_type (insn);
26176 switch (type)
26178 case TYPE_EXTS:
26179 case TYPE_CNTLZ:
26180 case TYPE_TRAP:
26181 case TYPE_MUL:
26182 case TYPE_INSERT:
26183 case TYPE_FPCOMPARE:
26184 case TYPE_MFCR:
26185 case TYPE_MTCR:
26186 case TYPE_MFJMPR:
26187 case TYPE_MTJMPR:
26188 case TYPE_ISYNC:
26189 case TYPE_SYNC:
26190 case TYPE_LOAD_L:
26191 case TYPE_STORE_C:
26192 return true;
26193 case TYPE_SHIFT:
26194 if (get_attr_dot (insn) == DOT_NO
26195 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26196 return true;
26197 else
26198 break;
26199 case TYPE_DIV:
26200 if (get_attr_size (insn) == SIZE_32)
26201 return true;
26202 else
26203 break;
26204 case TYPE_LOAD:
26205 case TYPE_STORE:
26206 case TYPE_FPLOAD:
26207 case TYPE_FPSTORE:
26208 if (get_attr_update (insn) == UPDATE_YES)
26209 return true;
26210 else
26211 break;
26212 default:
26213 break;
26215 break;
26216 case PROCESSOR_POWER7:
26217 type = get_attr_type (insn);
26219 switch (type)
26221 case TYPE_CR_LOGICAL:
26222 case TYPE_MFCR:
26223 case TYPE_MFCRF:
26224 case TYPE_MTCR:
26225 case TYPE_DIV:
26226 case TYPE_ISYNC:
26227 case TYPE_LOAD_L:
26228 case TYPE_STORE_C:
26229 case TYPE_MFJMPR:
26230 case TYPE_MTJMPR:
26231 return true;
26232 case TYPE_MUL:
26233 case TYPE_SHIFT:
26234 case TYPE_EXTS:
26235 if (get_attr_dot (insn) == DOT_YES)
26236 return true;
26237 else
26238 break;
26239 case TYPE_LOAD:
26240 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26241 || get_attr_update (insn) == UPDATE_YES)
26242 return true;
26243 else
26244 break;
26245 case TYPE_STORE:
26246 case TYPE_FPLOAD:
26247 case TYPE_FPSTORE:
26248 if (get_attr_update (insn) == UPDATE_YES)
26249 return true;
26250 else
26251 break;
26252 default:
26253 break;
26255 break;
26256 case PROCESSOR_POWER8:
26257 type = get_attr_type (insn);
26259 switch (type)
26261 case TYPE_CR_LOGICAL:
26262 case TYPE_MFCR:
26263 case TYPE_MFCRF:
26264 case TYPE_MTCR:
26265 case TYPE_SYNC:
26266 case TYPE_ISYNC:
26267 case TYPE_LOAD_L:
26268 case TYPE_STORE_C:
26269 case TYPE_VECSTORE:
26270 case TYPE_MFJMPR:
26271 case TYPE_MTJMPR:
26272 return true;
26273 case TYPE_SHIFT:
26274 case TYPE_EXTS:
26275 case TYPE_MUL:
26276 if (get_attr_dot (insn) == DOT_YES)
26277 return true;
26278 else
26279 break;
26280 case TYPE_LOAD:
26281 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26282 || get_attr_update (insn) == UPDATE_YES)
26283 return true;
26284 else
26285 break;
26286 case TYPE_STORE:
26287 if (get_attr_update (insn) == UPDATE_YES
26288 && get_attr_indexed (insn) == INDEXED_YES)
26289 return true;
26290 else
26291 break;
26292 default:
26293 break;
26295 break;
26296 default:
26297 break;
26300 return false;
26303 static bool
26304 insn_must_be_last_in_group (rtx_insn *insn)
26306 enum attr_type type;
26308 if (!insn
26309 || NOTE_P (insn)
26310 || DEBUG_INSN_P (insn)
26311 || GET_CODE (PATTERN (insn)) == USE
26312 || GET_CODE (PATTERN (insn)) == CLOBBER)
26313 return false;
26315 switch (rs6000_tune) {
26316 case PROCESSOR_POWER4:
26317 case PROCESSOR_POWER5:
26318 if (is_microcoded_insn (insn))
26319 return true;
26321 if (is_branch_slot_insn (insn))
26322 return true;
26324 break;
26325 case PROCESSOR_POWER6:
26326 type = get_attr_type (insn);
26328 switch (type)
26330 case TYPE_EXTS:
26331 case TYPE_CNTLZ:
26332 case TYPE_TRAP:
26333 case TYPE_MUL:
26334 case TYPE_FPCOMPARE:
26335 case TYPE_MFCR:
26336 case TYPE_MTCR:
26337 case TYPE_MFJMPR:
26338 case TYPE_MTJMPR:
26339 case TYPE_ISYNC:
26340 case TYPE_SYNC:
26341 case TYPE_LOAD_L:
26342 case TYPE_STORE_C:
26343 return true;
26344 case TYPE_SHIFT:
26345 if (get_attr_dot (insn) == DOT_NO
26346 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26347 return true;
26348 else
26349 break;
26350 case TYPE_DIV:
26351 if (get_attr_size (insn) == SIZE_32)
26352 return true;
26353 else
26354 break;
26355 default:
26356 break;
26358 break;
26359 case PROCESSOR_POWER7:
26360 type = get_attr_type (insn);
26362 switch (type)
26364 case TYPE_ISYNC:
26365 case TYPE_SYNC:
26366 case TYPE_LOAD_L:
26367 case TYPE_STORE_C:
26368 return true;
26369 case TYPE_LOAD:
26370 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26371 && get_attr_update (insn) == UPDATE_YES)
26372 return true;
26373 else
26374 break;
26375 case TYPE_STORE:
26376 if (get_attr_update (insn) == UPDATE_YES
26377 && get_attr_indexed (insn) == INDEXED_YES)
26378 return true;
26379 else
26380 break;
26381 default:
26382 break;
26384 break;
26385 case PROCESSOR_POWER8:
26386 type = get_attr_type (insn);
26388 switch (type)
26390 case TYPE_MFCR:
26391 case TYPE_MTCR:
26392 case TYPE_ISYNC:
26393 case TYPE_SYNC:
26394 case TYPE_LOAD_L:
26395 case TYPE_STORE_C:
26396 return true;
26397 case TYPE_LOAD:
26398 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26399 && get_attr_update (insn) == UPDATE_YES)
26400 return true;
26401 else
26402 break;
26403 case TYPE_STORE:
26404 if (get_attr_update (insn) == UPDATE_YES
26405 && get_attr_indexed (insn) == INDEXED_YES)
26406 return true;
26407 else
26408 break;
26409 default:
26410 break;
26412 break;
26413 default:
26414 break;
26417 return false;
26420 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
26421 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
26423 static bool
26424 is_costly_group (rtx *group_insns, rtx next_insn)
26426 int i;
26427 int issue_rate = rs6000_issue_rate ();
26429 for (i = 0; i < issue_rate; i++)
26431 sd_iterator_def sd_it;
26432 dep_t dep;
26433 rtx insn = group_insns[i];
26435 if (!insn)
26436 continue;
26438 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
26440 rtx next = DEP_CON (dep);
26442 if (next == next_insn
26443 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
26444 return true;
26448 return false;
26451 /* Utility of the function redefine_groups.
26452 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
26453 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
26454 to keep it "far" (in a separate group) from GROUP_INSNS, following
26455 one of the following schemes, depending on the value of the flag
26456 -minsert_sched_nops = X:
26457 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
26458 in order to force NEXT_INSN into a separate group.
26459 (2) X < sched_finish_regroup_exact: insert exactly X nops.
26460 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
26461 insertion (has a group just ended, how many vacant issue slots remain in the
26462 last group, and how many dispatch groups were encountered so far). */
26464 static int
26465 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
26466 rtx_insn *next_insn, bool *group_end, int can_issue_more,
26467 int *group_count)
26469 rtx nop;
26470 bool force;
26471 int issue_rate = rs6000_issue_rate ();
26472 bool end = *group_end;
26473 int i;
26475 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
26476 return can_issue_more;
26478 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
26479 return can_issue_more;
26481 force = is_costly_group (group_insns, next_insn);
26482 if (!force)
26483 return can_issue_more;
26485 if (sched_verbose > 6)
26486 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
26487 *group_count ,can_issue_more);
26489 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
26491 if (*group_end)
26492 can_issue_more = 0;
26494 /* Since only a branch can be issued in the last issue_slot, it is
26495 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
26496 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
26497 in this case the last nop will start a new group and the branch
26498 will be forced to the new group. */
26499 if (can_issue_more && !is_branch_slot_insn (next_insn))
26500 can_issue_more--;
26502 /* Do we have a special group ending nop? */
26503 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
26504 || rs6000_tune == PROCESSOR_POWER8)
26506 nop = gen_group_ending_nop ();
26507 emit_insn_before (nop, next_insn);
26508 can_issue_more = 0;
26510 else
26511 while (can_issue_more > 0)
26513 nop = gen_nop ();
26514 emit_insn_before (nop, next_insn);
26515 can_issue_more--;
26518 *group_end = true;
26519 return 0;
26522 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
26524 int n_nops = rs6000_sched_insert_nops;
26526 /* Nops can't be issued from the branch slot, so the effective
26527 issue_rate for nops is 'issue_rate - 1'. */
26528 if (can_issue_more == 0)
26529 can_issue_more = issue_rate;
26530 can_issue_more--;
26531 if (can_issue_more == 0)
26533 can_issue_more = issue_rate - 1;
26534 (*group_count)++;
26535 end = true;
26536 for (i = 0; i < issue_rate; i++)
26538 group_insns[i] = 0;
26542 while (n_nops > 0)
26544 nop = gen_nop ();
26545 emit_insn_before (nop, next_insn);
26546 if (can_issue_more == issue_rate - 1) /* new group begins */
26547 end = false;
26548 can_issue_more--;
26549 if (can_issue_more == 0)
26551 can_issue_more = issue_rate - 1;
26552 (*group_count)++;
26553 end = true;
26554 for (i = 0; i < issue_rate; i++)
26556 group_insns[i] = 0;
26559 n_nops--;
26562 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
26563 can_issue_more++;
26565 /* Is next_insn going to start a new group? */
26566 *group_end
26567 = (end
26568 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26569 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26570 || (can_issue_more < issue_rate &&
26571 insn_terminates_group_p (next_insn, previous_group)));
26572 if (*group_end && end)
26573 (*group_count)--;
26575 if (sched_verbose > 6)
26576 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
26577 *group_count, can_issue_more);
26578 return can_issue_more;
26581 return can_issue_more;
26584 /* This function tries to synch the dispatch groups that the compiler "sees"
26585 with the dispatch groups that the processor dispatcher is expected to
26586 form in practice. It tries to achieve this synchronization by forcing the
26587 estimated processor grouping on the compiler (as opposed to the function
26588 'pad_goups' which tries to force the scheduler's grouping on the processor).
26590 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
26591 examines the (estimated) dispatch groups that will be formed by the processor
26592 dispatcher. It marks these group boundaries to reflect the estimated
26593 processor grouping, overriding the grouping that the scheduler had marked.
26594 Depending on the value of the flag '-minsert-sched-nops' this function can
26595 force certain insns into separate groups or force a certain distance between
26596 them by inserting nops, for example, if there exists a "costly dependence"
26597 between the insns.
26599 The function estimates the group boundaries that the processor will form as
26600 follows: It keeps track of how many vacant issue slots are available after
26601 each insn. A subsequent insn will start a new group if one of the following
26602 4 cases applies:
26603 - no more vacant issue slots remain in the current dispatch group.
26604 - only the last issue slot, which is the branch slot, is vacant, but the next
26605 insn is not a branch.
26606 - only the last 2 or less issue slots, including the branch slot, are vacant,
26607 which means that a cracked insn (which occupies two issue slots) can't be
26608 issued in this group.
26609 - less than 'issue_rate' slots are vacant, and the next insn always needs to
26610 start a new group. */
26612 static int
26613 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26614 rtx_insn *tail)
26616 rtx_insn *insn, *next_insn;
26617 int issue_rate;
26618 int can_issue_more;
26619 int slot, i;
26620 bool group_end;
26621 int group_count = 0;
26622 rtx *group_insns;
26624 /* Initialize. */
26625 issue_rate = rs6000_issue_rate ();
26626 group_insns = XALLOCAVEC (rtx, issue_rate);
26627 for (i = 0; i < issue_rate; i++)
26629 group_insns[i] = 0;
26631 can_issue_more = issue_rate;
26632 slot = 0;
26633 insn = get_next_active_insn (prev_head_insn, tail);
26634 group_end = false;
26636 while (insn != NULL_RTX)
26638 slot = (issue_rate - can_issue_more);
26639 group_insns[slot] = insn;
26640 can_issue_more =
26641 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26642 if (insn_terminates_group_p (insn, current_group))
26643 can_issue_more = 0;
26645 next_insn = get_next_active_insn (insn, tail);
26646 if (next_insn == NULL_RTX)
26647 return group_count + 1;
26649 /* Is next_insn going to start a new group? */
26650 group_end
26651 = (can_issue_more == 0
26652 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26653 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26654 || (can_issue_more < issue_rate &&
26655 insn_terminates_group_p (next_insn, previous_group)));
26657 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
26658 next_insn, &group_end, can_issue_more,
26659 &group_count);
26661 if (group_end)
26663 group_count++;
26664 can_issue_more = 0;
26665 for (i = 0; i < issue_rate; i++)
26667 group_insns[i] = 0;
26671 if (GET_MODE (next_insn) == TImode && can_issue_more)
26672 PUT_MODE (next_insn, VOIDmode);
26673 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
26674 PUT_MODE (next_insn, TImode);
26676 insn = next_insn;
26677 if (can_issue_more == 0)
26678 can_issue_more = issue_rate;
26679 } /* while */
26681 return group_count;
26684 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
26685 dispatch group boundaries that the scheduler had marked. Pad with nops
26686 any dispatch groups which have vacant issue slots, in order to force the
26687 scheduler's grouping on the processor dispatcher. The function
26688 returns the number of dispatch groups found. */
26690 static int
26691 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26692 rtx_insn *tail)
26694 rtx_insn *insn, *next_insn;
26695 rtx nop;
26696 int issue_rate;
26697 int can_issue_more;
26698 int group_end;
26699 int group_count = 0;
26701 /* Initialize issue_rate. */
26702 issue_rate = rs6000_issue_rate ();
26703 can_issue_more = issue_rate;
26705 insn = get_next_active_insn (prev_head_insn, tail);
26706 next_insn = get_next_active_insn (insn, tail);
26708 while (insn != NULL_RTX)
26710 can_issue_more =
26711 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26713 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
26715 if (next_insn == NULL_RTX)
26716 break;
26718 if (group_end)
26720 /* If the scheduler had marked group termination at this location
26721 (between insn and next_insn), and neither insn nor next_insn will
26722 force group termination, pad the group with nops to force group
26723 termination. */
26724 if (can_issue_more
26725 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
26726 && !insn_terminates_group_p (insn, current_group)
26727 && !insn_terminates_group_p (next_insn, previous_group))
26729 if (!is_branch_slot_insn (next_insn))
26730 can_issue_more--;
26732 while (can_issue_more)
26734 nop = gen_nop ();
26735 emit_insn_before (nop, next_insn);
26736 can_issue_more--;
26740 can_issue_more = issue_rate;
26741 group_count++;
26744 insn = next_insn;
26745 next_insn = get_next_active_insn (insn, tail);
26748 return group_count;
26751 /* We're beginning a new block. Initialize data structures as necessary. */
26753 static void
26754 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
26755 int sched_verbose ATTRIBUTE_UNUSED,
26756 int max_ready ATTRIBUTE_UNUSED)
26758 last_scheduled_insn = NULL;
26759 load_store_pendulum = 0;
26760 divide_cnt = 0;
26761 vec_pairing = 0;
26764 /* The following function is called at the end of scheduling BB.
26765 After reload, it inserts nops at insn group bundling. */
26767 static void
26768 rs6000_sched_finish (FILE *dump, int sched_verbose)
26770 int n_groups;
26772 if (sched_verbose)
26773 fprintf (dump, "=== Finishing schedule.\n");
26775 if (reload_completed && rs6000_sched_groups)
26777 /* Do not run sched_finish hook when selective scheduling enabled. */
26778 if (sel_sched_p ())
26779 return;
26781 if (rs6000_sched_insert_nops == sched_finish_none)
26782 return;
26784 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
26785 n_groups = pad_groups (dump, sched_verbose,
26786 current_sched_info->prev_head,
26787 current_sched_info->next_tail);
26788 else
26789 n_groups = redefine_groups (dump, sched_verbose,
26790 current_sched_info->prev_head,
26791 current_sched_info->next_tail);
26793 if (sched_verbose >= 6)
26795 fprintf (dump, "ngroups = %d\n", n_groups);
26796 print_rtl (dump, current_sched_info->prev_head);
26797 fprintf (dump, "Done finish_sched\n");
26802 struct rs6000_sched_context
26804 short cached_can_issue_more;
26805 rtx_insn *last_scheduled_insn;
26806 int load_store_pendulum;
26807 int divide_cnt;
26808 int vec_pairing;
26811 typedef struct rs6000_sched_context rs6000_sched_context_def;
26812 typedef rs6000_sched_context_def *rs6000_sched_context_t;
26814 /* Allocate store for new scheduling context. */
26815 static void *
26816 rs6000_alloc_sched_context (void)
26818 return xmalloc (sizeof (rs6000_sched_context_def));
26821 /* If CLEAN_P is true then initializes _SC with clean data,
26822 and from the global context otherwise. */
26823 static void
26824 rs6000_init_sched_context (void *_sc, bool clean_p)
26826 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26828 if (clean_p)
26830 sc->cached_can_issue_more = 0;
26831 sc->last_scheduled_insn = NULL;
26832 sc->load_store_pendulum = 0;
26833 sc->divide_cnt = 0;
26834 sc->vec_pairing = 0;
26836 else
26838 sc->cached_can_issue_more = cached_can_issue_more;
26839 sc->last_scheduled_insn = last_scheduled_insn;
26840 sc->load_store_pendulum = load_store_pendulum;
26841 sc->divide_cnt = divide_cnt;
26842 sc->vec_pairing = vec_pairing;
26846 /* Sets the global scheduling context to the one pointed to by _SC. */
26847 static void
26848 rs6000_set_sched_context (void *_sc)
26850 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26852 gcc_assert (sc != NULL);
26854 cached_can_issue_more = sc->cached_can_issue_more;
26855 last_scheduled_insn = sc->last_scheduled_insn;
26856 load_store_pendulum = sc->load_store_pendulum;
26857 divide_cnt = sc->divide_cnt;
26858 vec_pairing = sc->vec_pairing;
26861 /* Free _SC. */
26862 static void
26863 rs6000_free_sched_context (void *_sc)
26865 gcc_assert (_sc != NULL);
26867 free (_sc);
26870 static bool
26871 rs6000_sched_can_speculate_insn (rtx_insn *insn)
26873 switch (get_attr_type (insn))
26875 case TYPE_DIV:
26876 case TYPE_SDIV:
26877 case TYPE_DDIV:
26878 case TYPE_VECDIV:
26879 case TYPE_SSQRT:
26880 case TYPE_DSQRT:
26881 return false;
26883 default:
26884 return true;
26888 /* Length in units of the trampoline for entering a nested function. */
26891 rs6000_trampoline_size (void)
26893 int ret = 0;
26895 switch (DEFAULT_ABI)
26897 default:
26898 gcc_unreachable ();
26900 case ABI_AIX:
26901 ret = (TARGET_32BIT) ? 12 : 24;
26902 break;
26904 case ABI_ELFv2:
26905 gcc_assert (!TARGET_32BIT);
26906 ret = 32;
26907 break;
26909 case ABI_DARWIN:
26910 case ABI_V4:
26911 ret = (TARGET_32BIT) ? 40 : 48;
26912 break;
26915 return ret;
26918 /* Emit RTL insns to initialize the variable parts of a trampoline.
26919 FNADDR is an RTX for the address of the function's pure code.
26920 CXT is an RTX for the static chain value for the function. */
26922 static void
26923 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
26925 int regsize = (TARGET_32BIT) ? 4 : 8;
26926 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
26927 rtx ctx_reg = force_reg (Pmode, cxt);
26928 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
26930 switch (DEFAULT_ABI)
26932 default:
26933 gcc_unreachable ();
26935 /* Under AIX, just build the 3 word function descriptor */
26936 case ABI_AIX:
26938 rtx fnmem, fn_reg, toc_reg;
26940 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
26941 error ("you cannot take the address of a nested function if you use "
26942 "the %qs option", "-mno-pointers-to-nested-functions");
26944 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
26945 fn_reg = gen_reg_rtx (Pmode);
26946 toc_reg = gen_reg_rtx (Pmode);
26948 /* Macro to shorten the code expansions below. */
26949 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
26951 m_tramp = replace_equiv_address (m_tramp, addr);
26953 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
26954 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
26955 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
26956 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
26957 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
26959 # undef MEM_PLUS
26961 break;
26963 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
26964 case ABI_ELFv2:
26965 case ABI_DARWIN:
26966 case ABI_V4:
26967 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
26968 LCT_NORMAL, VOIDmode,
26969 addr, Pmode,
26970 GEN_INT (rs6000_trampoline_size ()), SImode,
26971 fnaddr, Pmode,
26972 ctx_reg, Pmode);
26973 break;
26978 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
26979 identifier as an argument, so the front end shouldn't look it up. */
26981 static bool
26982 rs6000_attribute_takes_identifier_p (const_tree attr_id)
26984 return is_attribute_p ("altivec", attr_id);
26987 /* Handle the "altivec" attribute. The attribute may have
26988 arguments as follows:
26990 __attribute__((altivec(vector__)))
26991 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
26992 __attribute__((altivec(bool__))) (always followed by 'unsigned')
26994 and may appear more than once (e.g., 'vector bool char') in a
26995 given declaration. */
26997 static tree
26998 rs6000_handle_altivec_attribute (tree *node,
26999 tree name ATTRIBUTE_UNUSED,
27000 tree args,
27001 int flags ATTRIBUTE_UNUSED,
27002 bool *no_add_attrs)
27004 tree type = *node, result = NULL_TREE;
27005 machine_mode mode;
27006 int unsigned_p;
27007 char altivec_type
27008 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
27009 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
27010 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
27011 : '?');
27013 while (POINTER_TYPE_P (type)
27014 || TREE_CODE (type) == FUNCTION_TYPE
27015 || TREE_CODE (type) == METHOD_TYPE
27016 || TREE_CODE (type) == ARRAY_TYPE)
27017 type = TREE_TYPE (type);
27019 mode = TYPE_MODE (type);
27021 /* Check for invalid AltiVec type qualifiers. */
27022 if (type == long_double_type_node)
27023 error ("use of %<long double%> in AltiVec types is invalid");
27024 else if (type == boolean_type_node)
27025 error ("use of boolean types in AltiVec types is invalid");
27026 else if (TREE_CODE (type) == COMPLEX_TYPE)
27027 error ("use of %<complex%> in AltiVec types is invalid");
27028 else if (DECIMAL_FLOAT_MODE_P (mode))
27029 error ("use of decimal floating point types in AltiVec types is invalid");
27030 else if (!TARGET_VSX)
27032 if (type == long_unsigned_type_node || type == long_integer_type_node)
27034 if (TARGET_64BIT)
27035 error ("use of %<long%> in AltiVec types is invalid for "
27036 "64-bit code without %qs", "-mvsx");
27037 else if (rs6000_warn_altivec_long)
27038 warning (0, "use of %<long%> in AltiVec types is deprecated; "
27039 "use %<int%>");
27041 else if (type == long_long_unsigned_type_node
27042 || type == long_long_integer_type_node)
27043 error ("use of %<long long%> in AltiVec types is invalid without %qs",
27044 "-mvsx");
27045 else if (type == double_type_node)
27046 error ("use of %<double%> in AltiVec types is invalid without %qs",
27047 "-mvsx");
27050 switch (altivec_type)
27052 case 'v':
27053 unsigned_p = TYPE_UNSIGNED (type);
27054 switch (mode)
27056 case E_TImode:
27057 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
27058 break;
27059 case E_DImode:
27060 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
27061 break;
27062 case E_SImode:
27063 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
27064 break;
27065 case E_HImode:
27066 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
27067 break;
27068 case E_QImode:
27069 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
27070 break;
27071 case E_SFmode: result = V4SF_type_node; break;
27072 case E_DFmode: result = V2DF_type_node; break;
27073 /* If the user says 'vector int bool', we may be handed the 'bool'
27074 attribute _before_ the 'vector' attribute, and so select the
27075 proper type in the 'b' case below. */
27076 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
27077 case E_V2DImode: case E_V2DFmode:
27078 result = type;
27079 default: break;
27081 break;
27082 case 'b':
27083 switch (mode)
27085 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
27086 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
27087 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
27088 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
27089 default: break;
27091 break;
27092 case 'p':
27093 switch (mode)
27095 case E_V8HImode: result = pixel_V8HI_type_node;
27096 default: break;
27098 default: break;
27101 /* Propagate qualifiers attached to the element type
27102 onto the vector type. */
27103 if (result && result != type && TYPE_QUALS (type))
27104 result = build_qualified_type (result, TYPE_QUALS (type));
27106 *no_add_attrs = true; /* No need to hang on to the attribute. */
27108 if (result)
27109 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
27111 return NULL_TREE;
27114 /* AltiVec defines five built-in scalar types that serve as vector
27115 elements; we must teach the compiler how to mangle them. The 128-bit
27116 floating point mangling is target-specific as well. */
27118 static const char *
27119 rs6000_mangle_type (const_tree type)
27121 type = TYPE_MAIN_VARIANT (type);
27123 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
27124 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
27125 return NULL;
27127 if (type == bool_char_type_node) return "U6__boolc";
27128 if (type == bool_short_type_node) return "U6__bools";
27129 if (type == pixel_type_node) return "u7__pixel";
27130 if (type == bool_int_type_node) return "U6__booli";
27131 if (type == bool_long_long_type_node) return "U6__boolx";
27133 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
27134 return "g";
27135 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
27136 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
27138 /* For all other types, use the default mangling. */
27139 return NULL;
27142 /* Handle a "longcall" or "shortcall" attribute; arguments as in
27143 struct attribute_spec.handler. */
27145 static tree
27146 rs6000_handle_longcall_attribute (tree *node, tree name,
27147 tree args ATTRIBUTE_UNUSED,
27148 int flags ATTRIBUTE_UNUSED,
27149 bool *no_add_attrs)
27151 if (TREE_CODE (*node) != FUNCTION_TYPE
27152 && TREE_CODE (*node) != FIELD_DECL
27153 && TREE_CODE (*node) != TYPE_DECL)
27155 warning (OPT_Wattributes, "%qE attribute only applies to functions",
27156 name);
27157 *no_add_attrs = true;
27160 return NULL_TREE;
27163 /* Set longcall attributes on all functions declared when
27164 rs6000_default_long_calls is true. */
27165 static void
27166 rs6000_set_default_type_attributes (tree type)
27168 if (rs6000_default_long_calls
27169 && (TREE_CODE (type) == FUNCTION_TYPE
27170 || TREE_CODE (type) == METHOD_TYPE))
27171 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
27172 NULL_TREE,
27173 TYPE_ATTRIBUTES (type));
27175 #if TARGET_MACHO
27176 darwin_set_default_type_attributes (type);
27177 #endif
27180 /* Return a reference suitable for calling a function with the
27181 longcall attribute. */
27183 static rtx
27184 rs6000_longcall_ref (rtx call_ref, rtx arg)
27186 /* System V adds '.' to the internal name, so skip them. */
27187 const char *call_name = XSTR (call_ref, 0);
27188 if (*call_name == '.')
27190 while (*call_name == '.')
27191 call_name++;
27193 tree node = get_identifier (call_name);
27194 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
27197 if (TARGET_PLTSEQ)
27199 rtx base = const0_rtx;
27200 int regno = 12;
27201 if (rs6000_pcrel_p (cfun))
27203 rtx reg = gen_rtx_REG (Pmode, regno);
27204 rtx u = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27205 UNSPEC_PLT_PCREL);
27206 emit_insn (gen_rtx_SET (reg, u));
27207 return reg;
27210 if (DEFAULT_ABI == ABI_ELFv2)
27211 base = gen_rtx_REG (Pmode, TOC_REGISTER);
27212 else
27214 if (flag_pic)
27215 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
27216 regno = 11;
27218 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
27219 may be used by a function global entry point. For SysV4, r11
27220 is used by __glink_PLTresolve lazy resolver entry. */
27221 rtx reg = gen_rtx_REG (Pmode, regno);
27222 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27223 UNSPEC_PLT16_HA);
27224 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
27225 UNSPEC_PLT16_LO);
27226 emit_insn (gen_rtx_SET (reg, hi));
27227 emit_insn (gen_rtx_SET (reg, lo));
27228 return reg;
27231 return force_reg (Pmode, call_ref);
27234 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
27235 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
27236 #endif
27238 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
27239 struct attribute_spec.handler. */
27240 static tree
27241 rs6000_handle_struct_attribute (tree *node, tree name,
27242 tree args ATTRIBUTE_UNUSED,
27243 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27245 tree *type = NULL;
27246 if (DECL_P (*node))
27248 if (TREE_CODE (*node) == TYPE_DECL)
27249 type = &TREE_TYPE (*node);
27251 else
27252 type = node;
27254 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
27255 || TREE_CODE (*type) == UNION_TYPE)))
27257 warning (OPT_Wattributes, "%qE attribute ignored", name);
27258 *no_add_attrs = true;
27261 else if ((is_attribute_p ("ms_struct", name)
27262 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
27263 || ((is_attribute_p ("gcc_struct", name)
27264 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
27266 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
27267 name);
27268 *no_add_attrs = true;
27271 return NULL_TREE;
27274 static bool
27275 rs6000_ms_bitfield_layout_p (const_tree record_type)
27277 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
27278 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
27279 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
27282 #ifdef USING_ELFOS_H
27284 /* A get_unnamed_section callback, used for switching to toc_section. */
27286 static void
27287 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
27289 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27290 && TARGET_MINIMAL_TOC)
27292 if (!toc_initialized)
27294 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27295 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27296 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
27297 fprintf (asm_out_file, "\t.tc ");
27298 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
27299 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27300 fprintf (asm_out_file, "\n");
27302 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27303 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27304 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27305 fprintf (asm_out_file, " = .+32768\n");
27306 toc_initialized = 1;
27308 else
27309 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27311 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27313 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27314 if (!toc_initialized)
27316 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27317 toc_initialized = 1;
27320 else
27322 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27323 if (!toc_initialized)
27325 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27326 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27327 fprintf (asm_out_file, " = .+32768\n");
27328 toc_initialized = 1;
27333 /* Implement TARGET_ASM_INIT_SECTIONS. */
27335 static void
27336 rs6000_elf_asm_init_sections (void)
27338 toc_section
27339 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
27341 sdata2_section
27342 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
27343 SDATA2_SECTION_ASM_OP);
27346 /* Implement TARGET_SELECT_RTX_SECTION. */
27348 static section *
27349 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
27350 unsigned HOST_WIDE_INT align)
27352 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
27353 return toc_section;
27354 else
27355 return default_elf_select_rtx_section (mode, x, align);
27358 /* For a SYMBOL_REF, set generic flags and then perform some
27359 target-specific processing.
27361 When the AIX ABI is requested on a non-AIX system, replace the
27362 function name with the real name (with a leading .) rather than the
27363 function descriptor name. This saves a lot of overriding code to
27364 read the prefixes. */
27366 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
27367 static void
27368 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
27370 default_encode_section_info (decl, rtl, first);
27372 if (first
27373 && TREE_CODE (decl) == FUNCTION_DECL
27374 && !TARGET_AIX
27375 && DEFAULT_ABI == ABI_AIX)
27377 rtx sym_ref = XEXP (rtl, 0);
27378 size_t len = strlen (XSTR (sym_ref, 0));
27379 char *str = XALLOCAVEC (char, len + 2);
27380 str[0] = '.';
27381 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
27382 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
27386 static inline bool
27387 compare_section_name (const char *section, const char *templ)
27389 int len;
27391 len = strlen (templ);
27392 return (strncmp (section, templ, len) == 0
27393 && (section[len] == 0 || section[len] == '.'));
27396 bool
27397 rs6000_elf_in_small_data_p (const_tree decl)
27399 if (rs6000_sdata == SDATA_NONE)
27400 return false;
27402 /* We want to merge strings, so we never consider them small data. */
27403 if (TREE_CODE (decl) == STRING_CST)
27404 return false;
27406 /* Functions are never in the small data area. */
27407 if (TREE_CODE (decl) == FUNCTION_DECL)
27408 return false;
27410 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
27412 const char *section = DECL_SECTION_NAME (decl);
27413 if (compare_section_name (section, ".sdata")
27414 || compare_section_name (section, ".sdata2")
27415 || compare_section_name (section, ".gnu.linkonce.s")
27416 || compare_section_name (section, ".sbss")
27417 || compare_section_name (section, ".sbss2")
27418 || compare_section_name (section, ".gnu.linkonce.sb")
27419 || strcmp (section, ".PPC.EMB.sdata0") == 0
27420 || strcmp (section, ".PPC.EMB.sbss0") == 0)
27421 return true;
27423 else
27425 /* If we are told not to put readonly data in sdata, then don't. */
27426 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
27427 && !rs6000_readonly_in_sdata)
27428 return false;
27430 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
27432 if (size > 0
27433 && size <= g_switch_value
27434 /* If it's not public, and we're not going to reference it there,
27435 there's no need to put it in the small data section. */
27436 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
27437 return true;
27440 return false;
27443 #endif /* USING_ELFOS_H */
27445 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
27447 static bool
27448 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
27450 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
27453 /* Do not place thread-local symbols refs in the object blocks. */
27455 static bool
27456 rs6000_use_blocks_for_decl_p (const_tree decl)
27458 return !DECL_THREAD_LOCAL_P (decl);
27461 /* Return a REG that occurs in ADDR with coefficient 1.
27462 ADDR can be effectively incremented by incrementing REG.
27464 r0 is special and we must not select it as an address
27465 register by this routine since our caller will try to
27466 increment the returned register via an "la" instruction. */
27469 find_addr_reg (rtx addr)
27471 while (GET_CODE (addr) == PLUS)
27473 if (REG_P (XEXP (addr, 0))
27474 && REGNO (XEXP (addr, 0)) != 0)
27475 addr = XEXP (addr, 0);
27476 else if (REG_P (XEXP (addr, 1))
27477 && REGNO (XEXP (addr, 1)) != 0)
27478 addr = XEXP (addr, 1);
27479 else if (CONSTANT_P (XEXP (addr, 0)))
27480 addr = XEXP (addr, 1);
27481 else if (CONSTANT_P (XEXP (addr, 1)))
27482 addr = XEXP (addr, 0);
27483 else
27484 gcc_unreachable ();
27486 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
27487 return addr;
27490 void
27491 rs6000_fatal_bad_address (rtx op)
27493 fatal_insn ("bad address", op);
27496 #if TARGET_MACHO
27498 vec<branch_island, va_gc> *branch_islands;
27500 /* Remember to generate a branch island for far calls to the given
27501 function. */
27503 static void
27504 add_compiler_branch_island (tree label_name, tree function_name,
27505 int line_number)
27507 branch_island bi = {function_name, label_name, line_number};
27508 vec_safe_push (branch_islands, bi);
27511 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
27512 already there or not. */
27514 static int
27515 no_previous_def (tree function_name)
27517 branch_island *bi;
27518 unsigned ix;
27520 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27521 if (function_name == bi->function_name)
27522 return 0;
27523 return 1;
27526 /* GET_PREV_LABEL gets the label name from the previous definition of
27527 the function. */
27529 static tree
27530 get_prev_label (tree function_name)
27532 branch_island *bi;
27533 unsigned ix;
27535 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27536 if (function_name == bi->function_name)
27537 return bi->label_name;
27538 return NULL_TREE;
27541 /* Generate PIC and indirect symbol stubs. */
27543 void
27544 machopic_output_stub (FILE *file, const char *symb, const char *stub)
27546 unsigned int length;
27547 char *symbol_name, *lazy_ptr_name;
27548 char *local_label_0;
27549 static unsigned label = 0;
27551 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
27552 symb = (*targetm.strip_name_encoding) (symb);
27555 length = strlen (symb);
27556 symbol_name = XALLOCAVEC (char, length + 32);
27557 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
27559 lazy_ptr_name = XALLOCAVEC (char, length + 32);
27560 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
27562 if (flag_pic == 2)
27563 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
27564 else
27565 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
27567 if (flag_pic == 2)
27569 fprintf (file, "\t.align 5\n");
27571 fprintf (file, "%s:\n", stub);
27572 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27574 label++;
27575 local_label_0 = XALLOCAVEC (char, 16);
27576 sprintf (local_label_0, "L%u$spb", label);
27578 fprintf (file, "\tmflr r0\n");
27579 if (TARGET_LINK_STACK)
27581 char name[32];
27582 get_ppc476_thunk_name (name);
27583 fprintf (file, "\tbl %s\n", name);
27584 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27586 else
27588 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
27589 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27591 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
27592 lazy_ptr_name, local_label_0);
27593 fprintf (file, "\tmtlr r0\n");
27594 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
27595 (TARGET_64BIT ? "ldu" : "lwzu"),
27596 lazy_ptr_name, local_label_0);
27597 fprintf (file, "\tmtctr r12\n");
27598 fprintf (file, "\tbctr\n");
27600 else
27602 fprintf (file, "\t.align 4\n");
27604 fprintf (file, "%s:\n", stub);
27605 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27607 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
27608 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
27609 (TARGET_64BIT ? "ldu" : "lwzu"),
27610 lazy_ptr_name);
27611 fprintf (file, "\tmtctr r12\n");
27612 fprintf (file, "\tbctr\n");
27615 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
27616 fprintf (file, "%s:\n", lazy_ptr_name);
27617 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27618 fprintf (file, "%sdyld_stub_binding_helper\n",
27619 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
27622 /* Legitimize PIC addresses. If the address is already
27623 position-independent, we return ORIG. Newly generated
27624 position-independent addresses go into a reg. This is REG if non
27625 zero, otherwise we allocate register(s) as necessary. */
27627 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
27630 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
27631 rtx reg)
27633 rtx base, offset;
27635 if (reg == NULL && !reload_completed)
27636 reg = gen_reg_rtx (Pmode);
27638 if (GET_CODE (orig) == CONST)
27640 rtx reg_temp;
27642 if (GET_CODE (XEXP (orig, 0)) == PLUS
27643 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
27644 return orig;
27646 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
27648 /* Use a different reg for the intermediate value, as
27649 it will be marked UNCHANGING. */
27650 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
27651 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
27652 Pmode, reg_temp);
27653 offset =
27654 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
27655 Pmode, reg);
27657 if (CONST_INT_P (offset))
27659 if (SMALL_INT (offset))
27660 return plus_constant (Pmode, base, INTVAL (offset));
27661 else if (!reload_completed)
27662 offset = force_reg (Pmode, offset);
27663 else
27665 rtx mem = force_const_mem (Pmode, orig);
27666 return machopic_legitimize_pic_address (mem, Pmode, reg);
27669 return gen_rtx_PLUS (Pmode, base, offset);
27672 /* Fall back on generic machopic code. */
27673 return machopic_legitimize_pic_address (orig, mode, reg);
27676 /* Output a .machine directive for the Darwin assembler, and call
27677 the generic start_file routine. */
27679 static void
27680 rs6000_darwin_file_start (void)
27682 static const struct
27684 const char *arg;
27685 const char *name;
27686 HOST_WIDE_INT if_set;
27687 } mapping[] = {
27688 { "ppc64", "ppc64", MASK_64BIT },
27689 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
27690 { "power4", "ppc970", 0 },
27691 { "G5", "ppc970", 0 },
27692 { "7450", "ppc7450", 0 },
27693 { "7400", "ppc7400", MASK_ALTIVEC },
27694 { "G4", "ppc7400", 0 },
27695 { "750", "ppc750", 0 },
27696 { "740", "ppc750", 0 },
27697 { "G3", "ppc750", 0 },
27698 { "604e", "ppc604e", 0 },
27699 { "604", "ppc604", 0 },
27700 { "603e", "ppc603", 0 },
27701 { "603", "ppc603", 0 },
27702 { "601", "ppc601", 0 },
27703 { NULL, "ppc", 0 } };
27704 const char *cpu_id = "";
27705 size_t i;
27707 rs6000_file_start ();
27708 darwin_file_start ();
27710 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
27712 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
27713 cpu_id = rs6000_default_cpu;
27715 if (global_options_set.x_rs6000_cpu_index)
27716 cpu_id = processor_target_table[rs6000_cpu_index].name;
27718 /* Look through the mapping array. Pick the first name that either
27719 matches the argument, has a bit set in IF_SET that is also set
27720 in the target flags, or has a NULL name. */
27722 i = 0;
27723 while (mapping[i].arg != NULL
27724 && strcmp (mapping[i].arg, cpu_id) != 0
27725 && (mapping[i].if_set & rs6000_isa_flags) == 0)
27726 i++;
27728 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
27731 #endif /* TARGET_MACHO */
27733 #if TARGET_ELF
27734 static int
27735 rs6000_elf_reloc_rw_mask (void)
27737 if (flag_pic)
27738 return 3;
27739 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27740 return 2;
27741 else
27742 return 0;
27745 /* Record an element in the table of global constructors. SYMBOL is
27746 a SYMBOL_REF of the function to be called; PRIORITY is a number
27747 between 0 and MAX_INIT_PRIORITY.
27749 This differs from default_named_section_asm_out_constructor in
27750 that we have special handling for -mrelocatable. */
27752 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
27753 static void
27754 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
27756 const char *section = ".ctors";
27757 char buf[18];
27759 if (priority != DEFAULT_INIT_PRIORITY)
27761 sprintf (buf, ".ctors.%.5u",
27762 /* Invert the numbering so the linker puts us in the proper
27763 order; constructors are run from right to left, and the
27764 linker sorts in increasing order. */
27765 MAX_INIT_PRIORITY - priority);
27766 section = buf;
27769 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27770 assemble_align (POINTER_SIZE);
27772 if (DEFAULT_ABI == ABI_V4
27773 && (TARGET_RELOCATABLE || flag_pic > 1))
27775 fputs ("\t.long (", asm_out_file);
27776 output_addr_const (asm_out_file, symbol);
27777 fputs (")@fixup\n", asm_out_file);
27779 else
27780 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27783 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
27784 static void
27785 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
27787 const char *section = ".dtors";
27788 char buf[18];
27790 if (priority != DEFAULT_INIT_PRIORITY)
27792 sprintf (buf, ".dtors.%.5u",
27793 /* Invert the numbering so the linker puts us in the proper
27794 order; constructors are run from right to left, and the
27795 linker sorts in increasing order. */
27796 MAX_INIT_PRIORITY - priority);
27797 section = buf;
27800 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27801 assemble_align (POINTER_SIZE);
27803 if (DEFAULT_ABI == ABI_V4
27804 && (TARGET_RELOCATABLE || flag_pic > 1))
27806 fputs ("\t.long (", asm_out_file);
27807 output_addr_const (asm_out_file, symbol);
27808 fputs (")@fixup\n", asm_out_file);
27810 else
27811 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27814 void
27815 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
27817 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
27819 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
27820 ASM_OUTPUT_LABEL (file, name);
27821 fputs (DOUBLE_INT_ASM_OP, file);
27822 rs6000_output_function_entry (file, name);
27823 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
27824 if (DOT_SYMBOLS)
27826 fputs ("\t.size\t", file);
27827 assemble_name (file, name);
27828 fputs (",24\n\t.type\t.", file);
27829 assemble_name (file, name);
27830 fputs (",@function\n", file);
27831 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
27833 fputs ("\t.globl\t.", file);
27834 assemble_name (file, name);
27835 putc ('\n', file);
27838 else
27839 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27840 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27841 rs6000_output_function_entry (file, name);
27842 fputs (":\n", file);
27843 return;
27846 int uses_toc;
27847 if (DEFAULT_ABI == ABI_V4
27848 && (TARGET_RELOCATABLE || flag_pic > 1)
27849 && !TARGET_SECURE_PLT
27850 && (!constant_pool_empty_p () || crtl->profile)
27851 && (uses_toc = uses_TOC ()))
27853 char buf[256];
27855 if (uses_toc == 2)
27856 switch_to_other_text_partition ();
27857 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27859 fprintf (file, "\t.long ");
27860 assemble_name (file, toc_label_name);
27861 need_toc_init = 1;
27862 putc ('-', file);
27863 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27864 assemble_name (file, buf);
27865 putc ('\n', file);
27866 if (uses_toc == 2)
27867 switch_to_other_text_partition ();
27870 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27871 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27873 if (TARGET_CMODEL == CMODEL_LARGE
27874 && rs6000_global_entry_point_prologue_needed_p ())
27876 char buf[256];
27878 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27880 fprintf (file, "\t.quad .TOC.-");
27881 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27882 assemble_name (file, buf);
27883 putc ('\n', file);
27886 if (DEFAULT_ABI == ABI_AIX)
27888 const char *desc_name, *orig_name;
27890 orig_name = (*targetm.strip_name_encoding) (name);
27891 desc_name = orig_name;
27892 while (*desc_name == '.')
27893 desc_name++;
27895 if (TREE_PUBLIC (decl))
27896 fprintf (file, "\t.globl %s\n", desc_name);
27898 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27899 fprintf (file, "%s:\n", desc_name);
27900 fprintf (file, "\t.long %s\n", orig_name);
27901 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
27902 fputs ("\t.long 0\n", file);
27903 fprintf (file, "\t.previous\n");
27905 ASM_OUTPUT_LABEL (file, name);
27908 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
27909 static void
27910 rs6000_elf_file_end (void)
27912 #ifdef HAVE_AS_GNU_ATTRIBUTE
27913 /* ??? The value emitted depends on options active at file end.
27914 Assume anyone using #pragma or attributes that might change
27915 options knows what they are doing. */
27916 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
27917 && rs6000_passes_float)
27919 int fp;
27921 if (TARGET_HARD_FLOAT)
27922 fp = 1;
27923 else
27924 fp = 2;
27925 if (rs6000_passes_long_double)
27927 if (!TARGET_LONG_DOUBLE_128)
27928 fp |= 2 * 4;
27929 else if (TARGET_IEEEQUAD)
27930 fp |= 3 * 4;
27931 else
27932 fp |= 1 * 4;
27934 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
27936 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
27938 if (rs6000_passes_vector)
27939 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
27940 (TARGET_ALTIVEC_ABI ? 2 : 1));
27941 if (rs6000_returns_struct)
27942 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
27943 aix_struct_return ? 2 : 1);
27945 #endif
27946 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
27947 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
27948 file_end_indicate_exec_stack ();
27949 #endif
27951 if (flag_split_stack)
27952 file_end_indicate_split_stack ();
27954 if (cpu_builtin_p)
27956 /* We have expanded a CPU builtin, so we need to emit a reference to
27957 the special symbol that LIBC uses to declare it supports the
27958 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
27959 switch_to_section (data_section);
27960 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
27961 fprintf (asm_out_file, "\t%s %s\n",
27962 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
27965 #endif
27967 #if TARGET_XCOFF
27969 #ifndef HAVE_XCOFF_DWARF_EXTRAS
27970 #define HAVE_XCOFF_DWARF_EXTRAS 0
27971 #endif
27973 static enum unwind_info_type
27974 rs6000_xcoff_debug_unwind_info (void)
27976 return UI_NONE;
27979 static void
27980 rs6000_xcoff_asm_output_anchor (rtx symbol)
27982 char buffer[100];
27984 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
27985 SYMBOL_REF_BLOCK_OFFSET (symbol));
27986 fprintf (asm_out_file, "%s", SET_ASM_OP);
27987 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
27988 fprintf (asm_out_file, ",");
27989 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
27990 fprintf (asm_out_file, "\n");
27993 static void
27994 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
27996 fputs (GLOBAL_ASM_OP, stream);
27997 RS6000_OUTPUT_BASENAME (stream, name);
27998 putc ('\n', stream);
28001 /* A get_unnamed_decl callback, used for read-only sections. PTR
28002 points to the section string variable. */
28004 static void
28005 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
28007 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
28008 *(const char *const *) directive,
28009 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28012 /* Likewise for read-write sections. */
28014 static void
28015 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
28017 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
28018 *(const char *const *) directive,
28019 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28022 static void
28023 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
28025 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
28026 *(const char *const *) directive,
28027 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28030 /* A get_unnamed_section callback, used for switching to toc_section. */
28032 static void
28033 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28035 if (TARGET_MINIMAL_TOC)
28037 /* toc_section is always selected at least once from
28038 rs6000_xcoff_file_start, so this is guaranteed to
28039 always be defined once and only once in each file. */
28040 if (!toc_initialized)
28042 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
28043 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
28044 toc_initialized = 1;
28046 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
28047 (TARGET_32BIT ? "" : ",3"));
28049 else
28050 fputs ("\t.toc\n", asm_out_file);
28053 /* Implement TARGET_ASM_INIT_SECTIONS. */
28055 static void
28056 rs6000_xcoff_asm_init_sections (void)
28058 read_only_data_section
28059 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28060 &xcoff_read_only_section_name);
28062 private_data_section
28063 = get_unnamed_section (SECTION_WRITE,
28064 rs6000_xcoff_output_readwrite_section_asm_op,
28065 &xcoff_private_data_section_name);
28067 read_only_private_data_section
28068 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28069 &xcoff_private_rodata_section_name);
28071 tls_data_section
28072 = get_unnamed_section (SECTION_TLS,
28073 rs6000_xcoff_output_tls_section_asm_op,
28074 &xcoff_tls_data_section_name);
28076 tls_private_data_section
28077 = get_unnamed_section (SECTION_TLS,
28078 rs6000_xcoff_output_tls_section_asm_op,
28079 &xcoff_private_data_section_name);
28081 toc_section
28082 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
28084 readonly_data_section = read_only_data_section;
28087 static int
28088 rs6000_xcoff_reloc_rw_mask (void)
28090 return 3;
28093 static void
28094 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
28095 tree decl ATTRIBUTE_UNUSED)
28097 int smclass;
28098 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
28100 if (flags & SECTION_EXCLUDE)
28101 smclass = 4;
28102 else if (flags & SECTION_DEBUG)
28104 fprintf (asm_out_file, "\t.dwsect %s\n", name);
28105 return;
28107 else if (flags & SECTION_CODE)
28108 smclass = 0;
28109 else if (flags & SECTION_TLS)
28110 smclass = 3;
28111 else if (flags & SECTION_WRITE)
28112 smclass = 2;
28113 else
28114 smclass = 1;
28116 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
28117 (flags & SECTION_CODE) ? "." : "",
28118 name, suffix[smclass], flags & SECTION_ENTSIZE);
28121 #define IN_NAMED_SECTION(DECL) \
28122 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
28123 && DECL_SECTION_NAME (DECL) != NULL)
28125 static section *
28126 rs6000_xcoff_select_section (tree decl, int reloc,
28127 unsigned HOST_WIDE_INT align)
28129 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
28130 named section. */
28131 if (align > BIGGEST_ALIGNMENT)
28133 resolve_unique_section (decl, reloc, true);
28134 if (IN_NAMED_SECTION (decl))
28135 return get_named_section (decl, NULL, reloc);
28138 if (decl_readonly_section (decl, reloc))
28140 if (TREE_PUBLIC (decl))
28141 return read_only_data_section;
28142 else
28143 return read_only_private_data_section;
28145 else
28147 #if HAVE_AS_TLS
28148 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28150 if (TREE_PUBLIC (decl))
28151 return tls_data_section;
28152 else if (bss_initializer_p (decl))
28154 /* Convert to COMMON to emit in BSS. */
28155 DECL_COMMON (decl) = 1;
28156 return tls_comm_section;
28158 else
28159 return tls_private_data_section;
28161 else
28162 #endif
28163 if (TREE_PUBLIC (decl))
28164 return data_section;
28165 else
28166 return private_data_section;
28170 static void
28171 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
28173 const char *name;
28175 /* Use select_section for private data and uninitialized data with
28176 alignment <= BIGGEST_ALIGNMENT. */
28177 if (!TREE_PUBLIC (decl)
28178 || DECL_COMMON (decl)
28179 || (DECL_INITIAL (decl) == NULL_TREE
28180 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
28181 || DECL_INITIAL (decl) == error_mark_node
28182 || (flag_zero_initialized_in_bss
28183 && initializer_zerop (DECL_INITIAL (decl))))
28184 return;
28186 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
28187 name = (*targetm.strip_name_encoding) (name);
28188 set_decl_section_name (decl, name);
28191 /* Select section for constant in constant pool.
28193 On RS/6000, all constants are in the private read-only data area.
28194 However, if this is being placed in the TOC it must be output as a
28195 toc entry. */
28197 static section *
28198 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
28199 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
28201 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28202 return toc_section;
28203 else
28204 return read_only_private_data_section;
28207 /* Remove any trailing [DS] or the like from the symbol name. */
28209 static const char *
28210 rs6000_xcoff_strip_name_encoding (const char *name)
28212 size_t len;
28213 if (*name == '*')
28214 name++;
28215 len = strlen (name);
28216 if (name[len - 1] == ']')
28217 return ggc_alloc_string (name, len - 4);
28218 else
28219 return name;
28222 /* Section attributes. AIX is always PIC. */
28224 static unsigned int
28225 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
28227 unsigned int align;
28228 unsigned int flags = default_section_type_flags (decl, name, reloc);
28230 /* Align to at least UNIT size. */
28231 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
28232 align = MIN_UNITS_PER_WORD;
28233 else
28234 /* Increase alignment of large objects if not already stricter. */
28235 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
28236 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
28237 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
28239 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
28242 /* Output at beginning of assembler file.
28244 Initialize the section names for the RS/6000 at this point.
28246 Specify filename, including full path, to assembler.
28248 We want to go into the TOC section so at least one .toc will be emitted.
28249 Also, in order to output proper .bs/.es pairs, we need at least one static
28250 [RW] section emitted.
28252 Finally, declare mcount when profiling to make the assembler happy. */
28254 static void
28255 rs6000_xcoff_file_start (void)
28257 rs6000_gen_section_name (&xcoff_bss_section_name,
28258 main_input_filename, ".bss_");
28259 rs6000_gen_section_name (&xcoff_private_data_section_name,
28260 main_input_filename, ".rw_");
28261 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
28262 main_input_filename, ".rop_");
28263 rs6000_gen_section_name (&xcoff_read_only_section_name,
28264 main_input_filename, ".ro_");
28265 rs6000_gen_section_name (&xcoff_tls_data_section_name,
28266 main_input_filename, ".tls_");
28267 rs6000_gen_section_name (&xcoff_tbss_section_name,
28268 main_input_filename, ".tbss_[UL]");
28270 fputs ("\t.file\t", asm_out_file);
28271 output_quoted_string (asm_out_file, main_input_filename);
28272 fputc ('\n', asm_out_file);
28273 if (write_symbols != NO_DEBUG)
28274 switch_to_section (private_data_section);
28275 switch_to_section (toc_section);
28276 switch_to_section (text_section);
28277 if (profile_flag)
28278 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
28279 rs6000_file_start ();
28282 /* Output at end of assembler file.
28283 On the RS/6000, referencing data should automatically pull in text. */
28285 static void
28286 rs6000_xcoff_file_end (void)
28288 switch_to_section (text_section);
28289 fputs ("_section_.text:\n", asm_out_file);
28290 switch_to_section (data_section);
28291 fputs (TARGET_32BIT
28292 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
28293 asm_out_file);
28296 struct declare_alias_data
28298 FILE *file;
28299 bool function_descriptor;
28302 /* Declare alias N. A helper function for for_node_and_aliases. */
28304 static bool
28305 rs6000_declare_alias (struct symtab_node *n, void *d)
28307 struct declare_alias_data *data = (struct declare_alias_data *)d;
28308 /* Main symbol is output specially, because varasm machinery does part of
28309 the job for us - we do not need to declare .globl/lglobs and such. */
28310 if (!n->alias || n->weakref)
28311 return false;
28313 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
28314 return false;
28316 /* Prevent assemble_alias from trying to use .set pseudo operation
28317 that does not behave as expected by the middle-end. */
28318 TREE_ASM_WRITTEN (n->decl) = true;
28320 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
28321 char *buffer = (char *) alloca (strlen (name) + 2);
28322 char *p;
28323 int dollar_inside = 0;
28325 strcpy (buffer, name);
28326 p = strchr (buffer, '$');
28327 while (p) {
28328 *p = '_';
28329 dollar_inside++;
28330 p = strchr (p + 1, '$');
28332 if (TREE_PUBLIC (n->decl))
28334 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
28336 if (dollar_inside) {
28337 if (data->function_descriptor)
28338 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28339 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28341 if (data->function_descriptor)
28343 fputs ("\t.globl .", data->file);
28344 RS6000_OUTPUT_BASENAME (data->file, buffer);
28345 putc ('\n', data->file);
28347 fputs ("\t.globl ", data->file);
28348 RS6000_OUTPUT_BASENAME (data->file, buffer);
28349 putc ('\n', data->file);
28351 #ifdef ASM_WEAKEN_DECL
28352 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
28353 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
28354 #endif
28356 else
28358 if (dollar_inside)
28360 if (data->function_descriptor)
28361 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28362 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28364 if (data->function_descriptor)
28366 fputs ("\t.lglobl .", data->file);
28367 RS6000_OUTPUT_BASENAME (data->file, buffer);
28368 putc ('\n', data->file);
28370 fputs ("\t.lglobl ", data->file);
28371 RS6000_OUTPUT_BASENAME (data->file, buffer);
28372 putc ('\n', data->file);
28374 if (data->function_descriptor)
28375 fputs (".", data->file);
28376 RS6000_OUTPUT_BASENAME (data->file, buffer);
28377 fputs (":\n", data->file);
28378 return false;
28382 #ifdef HAVE_GAS_HIDDEN
28383 /* Helper function to calculate visibility of a DECL
28384 and return the value as a const string. */
28386 static const char *
28387 rs6000_xcoff_visibility (tree decl)
28389 static const char * const visibility_types[] = {
28390 "", ",protected", ",hidden", ",internal"
28393 enum symbol_visibility vis = DECL_VISIBILITY (decl);
28394 return visibility_types[vis];
28396 #endif
28399 /* This macro produces the initial definition of a function name.
28400 On the RS/6000, we need to place an extra '.' in the function name and
28401 output the function descriptor.
28402 Dollar signs are converted to underscores.
28404 The csect for the function will have already been created when
28405 text_section was selected. We do have to go back to that csect, however.
28407 The third and fourth parameters to the .function pseudo-op (16 and 044)
28408 are placeholders which no longer have any use.
28410 Because AIX assembler's .set command has unexpected semantics, we output
28411 all aliases as alternative labels in front of the definition. */
28413 void
28414 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
28416 char *buffer = (char *) alloca (strlen (name) + 1);
28417 char *p;
28418 int dollar_inside = 0;
28419 struct declare_alias_data data = {file, false};
28421 strcpy (buffer, name);
28422 p = strchr (buffer, '$');
28423 while (p) {
28424 *p = '_';
28425 dollar_inside++;
28426 p = strchr (p + 1, '$');
28428 if (TREE_PUBLIC (decl))
28430 if (!RS6000_WEAK || !DECL_WEAK (decl))
28432 if (dollar_inside) {
28433 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28434 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28436 fputs ("\t.globl .", file);
28437 RS6000_OUTPUT_BASENAME (file, buffer);
28438 #ifdef HAVE_GAS_HIDDEN
28439 fputs (rs6000_xcoff_visibility (decl), file);
28440 #endif
28441 putc ('\n', file);
28444 else
28446 if (dollar_inside) {
28447 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28448 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28450 fputs ("\t.lglobl .", file);
28451 RS6000_OUTPUT_BASENAME (file, buffer);
28452 putc ('\n', file);
28454 fputs ("\t.csect ", file);
28455 RS6000_OUTPUT_BASENAME (file, buffer);
28456 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
28457 RS6000_OUTPUT_BASENAME (file, buffer);
28458 fputs (":\n", file);
28459 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28460 &data, true);
28461 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
28462 RS6000_OUTPUT_BASENAME (file, buffer);
28463 fputs (", TOC[tc0], 0\n", file);
28464 in_section = NULL;
28465 switch_to_section (function_section (decl));
28466 putc ('.', file);
28467 RS6000_OUTPUT_BASENAME (file, buffer);
28468 fputs (":\n", file);
28469 data.function_descriptor = true;
28470 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28471 &data, true);
28472 if (!DECL_IGNORED_P (decl))
28474 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28475 xcoffout_declare_function (file, decl, buffer);
28476 else if (write_symbols == DWARF2_DEBUG)
28478 name = (*targetm.strip_name_encoding) (name);
28479 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
28482 return;
28486 /* Output assembly language to globalize a symbol from a DECL,
28487 possibly with visibility. */
28489 void
28490 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
28492 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
28493 fputs (GLOBAL_ASM_OP, stream);
28494 RS6000_OUTPUT_BASENAME (stream, name);
28495 #ifdef HAVE_GAS_HIDDEN
28496 fputs (rs6000_xcoff_visibility (decl), stream);
28497 #endif
28498 putc ('\n', stream);
28501 /* Output assembly language to define a symbol as COMMON from a DECL,
28502 possibly with visibility. */
28504 void
28505 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
28506 tree decl ATTRIBUTE_UNUSED,
28507 const char *name,
28508 unsigned HOST_WIDE_INT size,
28509 unsigned HOST_WIDE_INT align)
28511 unsigned HOST_WIDE_INT align2 = 2;
28513 if (align > 32)
28514 align2 = floor_log2 (align / BITS_PER_UNIT);
28515 else if (size > 4)
28516 align2 = 3;
28518 fputs (COMMON_ASM_OP, stream);
28519 RS6000_OUTPUT_BASENAME (stream, name);
28521 fprintf (stream,
28522 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
28523 size, align2);
28525 #ifdef HAVE_GAS_HIDDEN
28526 if (decl != NULL)
28527 fputs (rs6000_xcoff_visibility (decl), stream);
28528 #endif
28529 putc ('\n', stream);
28532 /* This macro produces the initial definition of a object (variable) name.
28533 Because AIX assembler's .set command has unexpected semantics, we output
28534 all aliases as alternative labels in front of the definition. */
28536 void
28537 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
28539 struct declare_alias_data data = {file, false};
28540 RS6000_OUTPUT_BASENAME (file, name);
28541 fputs (":\n", file);
28542 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28543 &data, true);
28546 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
28548 void
28549 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
28551 fputs (integer_asm_op (size, FALSE), file);
28552 assemble_name (file, label);
28553 fputs ("-$", file);
28556 /* Output a symbol offset relative to the dbase for the current object.
28557 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
28558 signed offsets.
28560 __gcc_unwind_dbase is embedded in all executables/libraries through
28561 libgcc/config/rs6000/crtdbase.S. */
28563 void
28564 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
28566 fputs (integer_asm_op (size, FALSE), file);
28567 assemble_name (file, label);
28568 fputs("-__gcc_unwind_dbase", file);
28571 #ifdef HAVE_AS_TLS
28572 static void
28573 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
28575 rtx symbol;
28576 int flags;
28577 const char *symname;
28579 default_encode_section_info (decl, rtl, first);
28581 /* Careful not to prod global register variables. */
28582 if (!MEM_P (rtl))
28583 return;
28584 symbol = XEXP (rtl, 0);
28585 if (!SYMBOL_REF_P (symbol))
28586 return;
28588 flags = SYMBOL_REF_FLAGS (symbol);
28590 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28591 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
28593 SYMBOL_REF_FLAGS (symbol) = flags;
28595 /* Append mapping class to extern decls. */
28596 symname = XSTR (symbol, 0);
28597 if (decl /* sync condition with assemble_external () */
28598 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
28599 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
28600 || TREE_CODE (decl) == FUNCTION_DECL)
28601 && symname[strlen (symname) - 1] != ']')
28603 char *newname = (char *) alloca (strlen (symname) + 5);
28604 strcpy (newname, symname);
28605 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
28606 ? "[DS]" : "[UA]"));
28607 XSTR (symbol, 0) = ggc_strdup (newname);
28610 #endif /* HAVE_AS_TLS */
28611 #endif /* TARGET_XCOFF */
28613 void
28614 rs6000_asm_weaken_decl (FILE *stream, tree decl,
28615 const char *name, const char *val)
28617 fputs ("\t.weak\t", stream);
28618 RS6000_OUTPUT_BASENAME (stream, name);
28619 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28620 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28622 if (TARGET_XCOFF)
28623 fputs ("[DS]", stream);
28624 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28625 if (TARGET_XCOFF)
28626 fputs (rs6000_xcoff_visibility (decl), stream);
28627 #endif
28628 fputs ("\n\t.weak\t.", stream);
28629 RS6000_OUTPUT_BASENAME (stream, name);
28631 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28632 if (TARGET_XCOFF)
28633 fputs (rs6000_xcoff_visibility (decl), stream);
28634 #endif
28635 fputc ('\n', stream);
28636 if (val)
28638 #ifdef ASM_OUTPUT_DEF
28639 ASM_OUTPUT_DEF (stream, name, val);
28640 #endif
28641 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28642 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28644 fputs ("\t.set\t.", stream);
28645 RS6000_OUTPUT_BASENAME (stream, name);
28646 fputs (",.", stream);
28647 RS6000_OUTPUT_BASENAME (stream, val);
28648 fputc ('\n', stream);
28654 /* Return true if INSN should not be copied. */
28656 static bool
28657 rs6000_cannot_copy_insn_p (rtx_insn *insn)
28659 return recog_memoized (insn) >= 0
28660 && get_attr_cannot_copy (insn);
28663 /* Compute a (partial) cost for rtx X. Return true if the complete
28664 cost has been computed, and false if subexpressions should be
28665 scanned. In either case, *TOTAL contains the cost result. */
28667 static bool
28668 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
28669 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
28671 int code = GET_CODE (x);
28673 switch (code)
28675 /* On the RS/6000, if it is valid in the insn, it is free. */
28676 case CONST_INT:
28677 if (((outer_code == SET
28678 || outer_code == PLUS
28679 || outer_code == MINUS)
28680 && (satisfies_constraint_I (x)
28681 || satisfies_constraint_L (x)))
28682 || (outer_code == AND
28683 && (satisfies_constraint_K (x)
28684 || (mode == SImode
28685 ? satisfies_constraint_L (x)
28686 : satisfies_constraint_J (x))))
28687 || ((outer_code == IOR || outer_code == XOR)
28688 && (satisfies_constraint_K (x)
28689 || (mode == SImode
28690 ? satisfies_constraint_L (x)
28691 : satisfies_constraint_J (x))))
28692 || outer_code == ASHIFT
28693 || outer_code == ASHIFTRT
28694 || outer_code == LSHIFTRT
28695 || outer_code == ROTATE
28696 || outer_code == ROTATERT
28697 || outer_code == ZERO_EXTRACT
28698 || (outer_code == MULT
28699 && satisfies_constraint_I (x))
28700 || ((outer_code == DIV || outer_code == UDIV
28701 || outer_code == MOD || outer_code == UMOD)
28702 && exact_log2 (INTVAL (x)) >= 0)
28703 || (outer_code == COMPARE
28704 && (satisfies_constraint_I (x)
28705 || satisfies_constraint_K (x)))
28706 || ((outer_code == EQ || outer_code == NE)
28707 && (satisfies_constraint_I (x)
28708 || satisfies_constraint_K (x)
28709 || (mode == SImode
28710 ? satisfies_constraint_L (x)
28711 : satisfies_constraint_J (x))))
28712 || (outer_code == GTU
28713 && satisfies_constraint_I (x))
28714 || (outer_code == LTU
28715 && satisfies_constraint_P (x)))
28717 *total = 0;
28718 return true;
28720 else if ((outer_code == PLUS
28721 && reg_or_add_cint_operand (x, VOIDmode))
28722 || (outer_code == MINUS
28723 && reg_or_sub_cint_operand (x, VOIDmode))
28724 || ((outer_code == SET
28725 || outer_code == IOR
28726 || outer_code == XOR)
28727 && (INTVAL (x)
28728 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
28730 *total = COSTS_N_INSNS (1);
28731 return true;
28733 /* FALLTHRU */
28735 case CONST_DOUBLE:
28736 case CONST_WIDE_INT:
28737 case CONST:
28738 case HIGH:
28739 case SYMBOL_REF:
28740 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28741 return true;
28743 case MEM:
28744 /* When optimizing for size, MEM should be slightly more expensive
28745 than generating address, e.g., (plus (reg) (const)).
28746 L1 cache latency is about two instructions. */
28747 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28748 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
28749 *total += COSTS_N_INSNS (100);
28750 return true;
28752 case LABEL_REF:
28753 *total = 0;
28754 return true;
28756 case PLUS:
28757 case MINUS:
28758 if (FLOAT_MODE_P (mode))
28759 *total = rs6000_cost->fp;
28760 else
28761 *total = COSTS_N_INSNS (1);
28762 return false;
28764 case MULT:
28765 if (CONST_INT_P (XEXP (x, 1))
28766 && satisfies_constraint_I (XEXP (x, 1)))
28768 if (INTVAL (XEXP (x, 1)) >= -256
28769 && INTVAL (XEXP (x, 1)) <= 255)
28770 *total = rs6000_cost->mulsi_const9;
28771 else
28772 *total = rs6000_cost->mulsi_const;
28774 else if (mode == SFmode)
28775 *total = rs6000_cost->fp;
28776 else if (FLOAT_MODE_P (mode))
28777 *total = rs6000_cost->dmul;
28778 else if (mode == DImode)
28779 *total = rs6000_cost->muldi;
28780 else
28781 *total = rs6000_cost->mulsi;
28782 return false;
28784 case FMA:
28785 if (mode == SFmode)
28786 *total = rs6000_cost->fp;
28787 else
28788 *total = rs6000_cost->dmul;
28789 break;
28791 case DIV:
28792 case MOD:
28793 if (FLOAT_MODE_P (mode))
28795 *total = mode == DFmode ? rs6000_cost->ddiv
28796 : rs6000_cost->sdiv;
28797 return false;
28799 /* FALLTHRU */
28801 case UDIV:
28802 case UMOD:
28803 if (CONST_INT_P (XEXP (x, 1))
28804 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
28806 if (code == DIV || code == MOD)
28807 /* Shift, addze */
28808 *total = COSTS_N_INSNS (2);
28809 else
28810 /* Shift */
28811 *total = COSTS_N_INSNS (1);
28813 else
28815 if (GET_MODE (XEXP (x, 1)) == DImode)
28816 *total = rs6000_cost->divdi;
28817 else
28818 *total = rs6000_cost->divsi;
28820 /* Add in shift and subtract for MOD unless we have a mod instruction. */
28821 if (!TARGET_MODULO && (code == MOD || code == UMOD))
28822 *total += COSTS_N_INSNS (2);
28823 return false;
28825 case CTZ:
28826 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
28827 return false;
28829 case FFS:
28830 *total = COSTS_N_INSNS (4);
28831 return false;
28833 case POPCOUNT:
28834 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
28835 return false;
28837 case PARITY:
28838 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
28839 return false;
28841 case NOT:
28842 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
28843 *total = 0;
28844 else
28845 *total = COSTS_N_INSNS (1);
28846 return false;
28848 case AND:
28849 if (CONST_INT_P (XEXP (x, 1)))
28851 rtx left = XEXP (x, 0);
28852 rtx_code left_code = GET_CODE (left);
28854 /* rotate-and-mask: 1 insn. */
28855 if ((left_code == ROTATE
28856 || left_code == ASHIFT
28857 || left_code == LSHIFTRT)
28858 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
28860 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
28861 if (!CONST_INT_P (XEXP (left, 1)))
28862 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
28863 *total += COSTS_N_INSNS (1);
28864 return true;
28867 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
28868 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
28869 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
28870 || (val & 0xffff) == val
28871 || (val & 0xffff0000) == val
28872 || ((val & 0xffff) == 0 && mode == SImode))
28874 *total = rtx_cost (left, mode, AND, 0, speed);
28875 *total += COSTS_N_INSNS (1);
28876 return true;
28879 /* 2 insns. */
28880 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
28882 *total = rtx_cost (left, mode, AND, 0, speed);
28883 *total += COSTS_N_INSNS (2);
28884 return true;
28888 *total = COSTS_N_INSNS (1);
28889 return false;
28891 case IOR:
28892 /* FIXME */
28893 *total = COSTS_N_INSNS (1);
28894 return true;
28896 case CLZ:
28897 case XOR:
28898 case ZERO_EXTRACT:
28899 *total = COSTS_N_INSNS (1);
28900 return false;
28902 case ASHIFT:
28903 /* The EXTSWSLI instruction is a combined instruction. Don't count both
28904 the sign extend and shift separately within the insn. */
28905 if (TARGET_EXTSWSLI && mode == DImode
28906 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
28907 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
28909 *total = 0;
28910 return false;
28912 /* fall through */
28914 case ASHIFTRT:
28915 case LSHIFTRT:
28916 case ROTATE:
28917 case ROTATERT:
28918 /* Handle mul_highpart. */
28919 if (outer_code == TRUNCATE
28920 && GET_CODE (XEXP (x, 0)) == MULT)
28922 if (mode == DImode)
28923 *total = rs6000_cost->muldi;
28924 else
28925 *total = rs6000_cost->mulsi;
28926 return true;
28928 else if (outer_code == AND)
28929 *total = 0;
28930 else
28931 *total = COSTS_N_INSNS (1);
28932 return false;
28934 case SIGN_EXTEND:
28935 case ZERO_EXTEND:
28936 if (MEM_P (XEXP (x, 0)))
28937 *total = 0;
28938 else
28939 *total = COSTS_N_INSNS (1);
28940 return false;
28942 case COMPARE:
28943 case NEG:
28944 case ABS:
28945 if (!FLOAT_MODE_P (mode))
28947 *total = COSTS_N_INSNS (1);
28948 return false;
28950 /* FALLTHRU */
28952 case FLOAT:
28953 case UNSIGNED_FLOAT:
28954 case FIX:
28955 case UNSIGNED_FIX:
28956 case FLOAT_TRUNCATE:
28957 *total = rs6000_cost->fp;
28958 return false;
28960 case FLOAT_EXTEND:
28961 if (mode == DFmode)
28962 *total = rs6000_cost->sfdf_convert;
28963 else
28964 *total = rs6000_cost->fp;
28965 return false;
28967 case UNSPEC:
28968 switch (XINT (x, 1))
28970 case UNSPEC_FRSP:
28971 *total = rs6000_cost->fp;
28972 return true;
28974 default:
28975 break;
28977 break;
28979 case CALL:
28980 case IF_THEN_ELSE:
28981 if (!speed)
28983 *total = COSTS_N_INSNS (1);
28984 return true;
28986 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
28988 *total = rs6000_cost->fp;
28989 return false;
28991 break;
28993 case NE:
28994 case EQ:
28995 case GTU:
28996 case LTU:
28997 /* Carry bit requires mode == Pmode.
28998 NEG or PLUS already counted so only add one. */
28999 if (mode == Pmode
29000 && (outer_code == NEG || outer_code == PLUS))
29002 *total = COSTS_N_INSNS (1);
29003 return true;
29005 /* FALLTHRU */
29007 case GT:
29008 case LT:
29009 case UNORDERED:
29010 if (outer_code == SET)
29012 if (XEXP (x, 1) == const0_rtx)
29014 *total = COSTS_N_INSNS (2);
29015 return true;
29017 else
29019 *total = COSTS_N_INSNS (3);
29020 return false;
29023 /* CC COMPARE. */
29024 if (outer_code == COMPARE)
29026 *total = 0;
29027 return true;
29029 break;
29031 default:
29032 break;
29035 return false;
29038 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29040 static bool
29041 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
29042 int opno, int *total, bool speed)
29044 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
29046 fprintf (stderr,
29047 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
29048 "opno = %d, total = %d, speed = %s, x:\n",
29049 ret ? "complete" : "scan inner",
29050 GET_MODE_NAME (mode),
29051 GET_RTX_NAME (outer_code),
29052 opno,
29053 *total,
29054 speed ? "true" : "false");
29056 debug_rtx (x);
29058 return ret;
29061 static int
29062 rs6000_insn_cost (rtx_insn *insn, bool speed)
29064 if (recog_memoized (insn) < 0)
29065 return 0;
29067 if (!speed)
29068 return get_attr_length (insn);
29070 int cost = get_attr_cost (insn);
29071 if (cost > 0)
29072 return cost;
29074 int n = get_attr_length (insn) / 4;
29075 enum attr_type type = get_attr_type (insn);
29077 switch (type)
29079 case TYPE_LOAD:
29080 case TYPE_FPLOAD:
29081 case TYPE_VECLOAD:
29082 cost = COSTS_N_INSNS (n + 1);
29083 break;
29085 case TYPE_MUL:
29086 switch (get_attr_size (insn))
29088 case SIZE_8:
29089 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
29090 break;
29091 case SIZE_16:
29092 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
29093 break;
29094 case SIZE_32:
29095 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
29096 break;
29097 case SIZE_64:
29098 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
29099 break;
29100 default:
29101 gcc_unreachable ();
29103 break;
29104 case TYPE_DIV:
29105 switch (get_attr_size (insn))
29107 case SIZE_32:
29108 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
29109 break;
29110 case SIZE_64:
29111 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
29112 break;
29113 default:
29114 gcc_unreachable ();
29116 break;
29118 case TYPE_FP:
29119 cost = n * rs6000_cost->fp;
29120 break;
29121 case TYPE_DMUL:
29122 cost = n * rs6000_cost->dmul;
29123 break;
29124 case TYPE_SDIV:
29125 cost = n * rs6000_cost->sdiv;
29126 break;
29127 case TYPE_DDIV:
29128 cost = n * rs6000_cost->ddiv;
29129 break;
29131 case TYPE_SYNC:
29132 case TYPE_LOAD_L:
29133 case TYPE_MFCR:
29134 case TYPE_MFCRF:
29135 cost = COSTS_N_INSNS (n + 2);
29136 break;
29138 default:
29139 cost = COSTS_N_INSNS (n);
29142 return cost;
29145 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29147 static int
29148 rs6000_debug_address_cost (rtx x, machine_mode mode,
29149 addr_space_t as, bool speed)
29151 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
29153 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29154 ret, speed ? "true" : "false");
29155 debug_rtx (x);
29157 return ret;
29161 /* A C expression returning the cost of moving data from a register of class
29162 CLASS1 to one of CLASS2. */
29164 static int
29165 rs6000_register_move_cost (machine_mode mode,
29166 reg_class_t from, reg_class_t to)
29168 int ret;
29169 reg_class_t rclass;
29171 if (TARGET_DEBUG_COST)
29172 dbg_cost_ctrl++;
29174 /* If we have VSX, we can easily move between FPR or Altivec registers,
29175 otherwise we can only easily move within classes.
29176 Do this first so we give best-case answers for union classes
29177 containing both gprs and vsx regs. */
29178 HARD_REG_SET to_vsx, from_vsx;
29179 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
29180 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
29181 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
29182 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
29183 if (!hard_reg_set_empty_p (to_vsx)
29184 && !hard_reg_set_empty_p (from_vsx)
29185 && (TARGET_VSX
29186 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
29188 int reg = FIRST_FPR_REGNO;
29189 if (TARGET_VSX
29190 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
29191 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
29192 reg = FIRST_ALTIVEC_REGNO;
29193 ret = 2 * hard_regno_nregs (reg, mode);
29196 /* Moves from/to GENERAL_REGS. */
29197 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
29198 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
29200 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
29202 if (TARGET_DIRECT_MOVE)
29204 /* Keep the cost for direct moves above that for within
29205 a register class even if the actual processor cost is
29206 comparable. We do this because a direct move insn
29207 can't be a nop, whereas with ideal register
29208 allocation a move within the same class might turn
29209 out to be a nop. */
29210 if (rs6000_tune == PROCESSOR_POWER9
29211 || rs6000_tune == PROCESSOR_FUTURE)
29212 ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29213 else
29214 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29215 /* SFmode requires a conversion when moving between gprs
29216 and vsx. */
29217 if (mode == SFmode)
29218 ret += 2;
29220 else
29221 ret = (rs6000_memory_move_cost (mode, rclass, false)
29222 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
29225 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29226 shift. */
29227 else if (rclass == CR_REGS)
29228 ret = 4;
29230 /* For those processors that have slow LR/CTR moves, make them more
29231 expensive than memory in order to bias spills to memory .*/
29232 else if ((rs6000_tune == PROCESSOR_POWER6
29233 || rs6000_tune == PROCESSOR_POWER7
29234 || rs6000_tune == PROCESSOR_POWER8
29235 || rs6000_tune == PROCESSOR_POWER9)
29236 && reg_class_subset_p (rclass, SPECIAL_REGS))
29237 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29239 else
29240 /* A move will cost one instruction per GPR moved. */
29241 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29244 /* Everything else has to go through GENERAL_REGS. */
29245 else
29246 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
29247 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
29249 if (TARGET_DEBUG_COST)
29251 if (dbg_cost_ctrl == 1)
29252 fprintf (stderr,
29253 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
29254 ret, GET_MODE_NAME (mode), reg_class_names[from],
29255 reg_class_names[to]);
29256 dbg_cost_ctrl--;
29259 return ret;
29262 /* A C expressions returning the cost of moving data of MODE from a register to
29263 or from memory. */
29265 static int
29266 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
29267 bool in ATTRIBUTE_UNUSED)
29269 int ret;
29271 if (TARGET_DEBUG_COST)
29272 dbg_cost_ctrl++;
29274 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
29275 ret = 4 * hard_regno_nregs (0, mode);
29276 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
29277 || reg_classes_intersect_p (rclass, VSX_REGS)))
29278 ret = 4 * hard_regno_nregs (32, mode);
29279 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
29280 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
29281 else
29282 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
29284 if (TARGET_DEBUG_COST)
29286 if (dbg_cost_ctrl == 1)
29287 fprintf (stderr,
29288 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29289 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
29290 dbg_cost_ctrl--;
29293 return ret;
29296 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
29298 The register allocator chooses GEN_OR_VSX_REGS for the allocno
29299 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
29300 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
29301 move cost between GENERAL_REGS and VSX_REGS low.
29303 It might seem reasonable to use a union class. After all, if usage
29304 of vsr is low and gpr high, it might make sense to spill gpr to vsr
29305 rather than memory. However, in cases where register pressure of
29306 both is high, like the cactus_adm spec test, allowing
29307 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
29308 the first scheduling pass. This is partly due to an allocno of
29309 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
29310 class, which gives too high a pressure for GENERAL_REGS and too low
29311 for VSX_REGS. So, force a choice of the subclass here.
29313 The best class is also the union if GENERAL_REGS and VSX_REGS have
29314 the same cost. In that case we do use GEN_OR_VSX_REGS as the
29315 allocno class, since trying to narrow down the class by regno mode
29316 is prone to error. For example, SImode is allowed in VSX regs and
29317 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
29318 it would be wrong to choose an allocno of GENERAL_REGS based on
29319 SImode. */
29321 static reg_class_t
29322 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
29323 reg_class_t allocno_class,
29324 reg_class_t best_class)
29326 switch (allocno_class)
29328 case GEN_OR_VSX_REGS:
29329 /* best_class must be a subset of allocno_class. */
29330 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
29331 || best_class == GEN_OR_FLOAT_REGS
29332 || best_class == VSX_REGS
29333 || best_class == ALTIVEC_REGS
29334 || best_class == FLOAT_REGS
29335 || best_class == GENERAL_REGS
29336 || best_class == BASE_REGS);
29337 /* Use best_class but choose wider classes when copying from the
29338 wider class to best_class is cheap. This mimics IRA choice
29339 of allocno class. */
29340 if (best_class == BASE_REGS)
29341 return GENERAL_REGS;
29342 if (TARGET_VSX
29343 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
29344 return VSX_REGS;
29345 return best_class;
29347 default:
29348 break;
29351 return allocno_class;
29354 /* Returns a code for a target-specific builtin that implements
29355 reciprocal of the function, or NULL_TREE if not available. */
29357 static tree
29358 rs6000_builtin_reciprocal (tree fndecl)
29360 switch (DECL_FUNCTION_CODE (fndecl))
29362 case VSX_BUILTIN_XVSQRTDP:
29363 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
29364 return NULL_TREE;
29366 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
29368 case VSX_BUILTIN_XVSQRTSP:
29369 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
29370 return NULL_TREE;
29372 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
29374 default:
29375 return NULL_TREE;
29379 /* Load up a constant. If the mode is a vector mode, splat the value across
29380 all of the vector elements. */
29382 static rtx
29383 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
29385 rtx reg;
29387 if (mode == SFmode || mode == DFmode)
29389 rtx d = const_double_from_real_value (dconst, mode);
29390 reg = force_reg (mode, d);
29392 else if (mode == V4SFmode)
29394 rtx d = const_double_from_real_value (dconst, SFmode);
29395 rtvec v = gen_rtvec (4, d, d, d, d);
29396 reg = gen_reg_rtx (mode);
29397 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29399 else if (mode == V2DFmode)
29401 rtx d = const_double_from_real_value (dconst, DFmode);
29402 rtvec v = gen_rtvec (2, d, d);
29403 reg = gen_reg_rtx (mode);
29404 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29406 else
29407 gcc_unreachable ();
29409 return reg;
29412 /* Generate an FMA instruction. */
29414 static void
29415 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
29417 machine_mode mode = GET_MODE (target);
29418 rtx dst;
29420 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
29421 gcc_assert (dst != NULL);
29423 if (dst != target)
29424 emit_move_insn (target, dst);
29427 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
29429 static void
29430 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
29432 machine_mode mode = GET_MODE (dst);
29433 rtx r;
29435 /* This is a tad more complicated, since the fnma_optab is for
29436 a different expression: fma(-m1, m2, a), which is the same
29437 thing except in the case of signed zeros.
29439 Fortunately we know that if FMA is supported that FNMSUB is
29440 also supported in the ISA. Just expand it directly. */
29442 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
29444 r = gen_rtx_NEG (mode, a);
29445 r = gen_rtx_FMA (mode, m1, m2, r);
29446 r = gen_rtx_NEG (mode, r);
29447 emit_insn (gen_rtx_SET (dst, r));
29450 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
29451 add a reg_note saying that this was a division. Support both scalar and
29452 vector divide. Assumes no trapping math and finite arguments. */
29454 void
29455 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
29457 machine_mode mode = GET_MODE (dst);
29458 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
29459 int i;
29461 /* Low precision estimates guarantee 5 bits of accuracy. High
29462 precision estimates guarantee 14 bits of accuracy. SFmode
29463 requires 23 bits of accuracy. DFmode requires 52 bits of
29464 accuracy. Each pass at least doubles the accuracy, leading
29465 to the following. */
29466 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29467 if (mode == DFmode || mode == V2DFmode)
29468 passes++;
29470 enum insn_code code = optab_handler (smul_optab, mode);
29471 insn_gen_fn gen_mul = GEN_FCN (code);
29473 gcc_assert (code != CODE_FOR_nothing);
29475 one = rs6000_load_constant_and_splat (mode, dconst1);
29477 /* x0 = 1./d estimate */
29478 x0 = gen_reg_rtx (mode);
29479 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
29480 UNSPEC_FRES)));
29482 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
29483 if (passes > 1) {
29485 /* e0 = 1. - d * x0 */
29486 e0 = gen_reg_rtx (mode);
29487 rs6000_emit_nmsub (e0, d, x0, one);
29489 /* x1 = x0 + e0 * x0 */
29490 x1 = gen_reg_rtx (mode);
29491 rs6000_emit_madd (x1, e0, x0, x0);
29493 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
29494 ++i, xprev = xnext, eprev = enext) {
29496 /* enext = eprev * eprev */
29497 enext = gen_reg_rtx (mode);
29498 emit_insn (gen_mul (enext, eprev, eprev));
29500 /* xnext = xprev + enext * xprev */
29501 xnext = gen_reg_rtx (mode);
29502 rs6000_emit_madd (xnext, enext, xprev, xprev);
29505 } else
29506 xprev = x0;
29508 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
29510 /* u = n * xprev */
29511 u = gen_reg_rtx (mode);
29512 emit_insn (gen_mul (u, n, xprev));
29514 /* v = n - (d * u) */
29515 v = gen_reg_rtx (mode);
29516 rs6000_emit_nmsub (v, d, u, n);
29518 /* dst = (v * xprev) + u */
29519 rs6000_emit_madd (dst, v, xprev, u);
29521 if (note_p)
29522 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
29525 /* Goldschmidt's Algorithm for single/double-precision floating point
29526 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
29528 void
29529 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
29531 machine_mode mode = GET_MODE (src);
29532 rtx e = gen_reg_rtx (mode);
29533 rtx g = gen_reg_rtx (mode);
29534 rtx h = gen_reg_rtx (mode);
29536 /* Low precision estimates guarantee 5 bits of accuracy. High
29537 precision estimates guarantee 14 bits of accuracy. SFmode
29538 requires 23 bits of accuracy. DFmode requires 52 bits of
29539 accuracy. Each pass at least doubles the accuracy, leading
29540 to the following. */
29541 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29542 if (mode == DFmode || mode == V2DFmode)
29543 passes++;
29545 int i;
29546 rtx mhalf;
29547 enum insn_code code = optab_handler (smul_optab, mode);
29548 insn_gen_fn gen_mul = GEN_FCN (code);
29550 gcc_assert (code != CODE_FOR_nothing);
29552 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
29554 /* e = rsqrt estimate */
29555 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
29556 UNSPEC_RSQRT)));
29558 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
29559 if (!recip)
29561 rtx zero = force_reg (mode, CONST0_RTX (mode));
29563 if (mode == SFmode)
29565 rtx target = emit_conditional_move (e, GT, src, zero, mode,
29566 e, zero, mode, 0);
29567 if (target != e)
29568 emit_move_insn (e, target);
29570 else
29572 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
29573 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
29577 /* g = sqrt estimate. */
29578 emit_insn (gen_mul (g, e, src));
29579 /* h = 1/(2*sqrt) estimate. */
29580 emit_insn (gen_mul (h, e, mhalf));
29582 if (recip)
29584 if (passes == 1)
29586 rtx t = gen_reg_rtx (mode);
29587 rs6000_emit_nmsub (t, g, h, mhalf);
29588 /* Apply correction directly to 1/rsqrt estimate. */
29589 rs6000_emit_madd (dst, e, t, e);
29591 else
29593 for (i = 0; i < passes; i++)
29595 rtx t1 = gen_reg_rtx (mode);
29596 rtx g1 = gen_reg_rtx (mode);
29597 rtx h1 = gen_reg_rtx (mode);
29599 rs6000_emit_nmsub (t1, g, h, mhalf);
29600 rs6000_emit_madd (g1, g, t1, g);
29601 rs6000_emit_madd (h1, h, t1, h);
29603 g = g1;
29604 h = h1;
29606 /* Multiply by 2 for 1/rsqrt. */
29607 emit_insn (gen_add3_insn (dst, h, h));
29610 else
29612 rtx t = gen_reg_rtx (mode);
29613 rs6000_emit_nmsub (t, g, h, mhalf);
29614 rs6000_emit_madd (dst, g, t, g);
29617 return;
29620 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
29621 (Power7) targets. DST is the target, and SRC is the argument operand. */
29623 void
29624 rs6000_emit_popcount (rtx dst, rtx src)
29626 machine_mode mode = GET_MODE (dst);
29627 rtx tmp1, tmp2;
29629 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
29630 if (TARGET_POPCNTD)
29632 if (mode == SImode)
29633 emit_insn (gen_popcntdsi2 (dst, src));
29634 else
29635 emit_insn (gen_popcntddi2 (dst, src));
29636 return;
29639 tmp1 = gen_reg_rtx (mode);
29641 if (mode == SImode)
29643 emit_insn (gen_popcntbsi2 (tmp1, src));
29644 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
29645 NULL_RTX, 0);
29646 tmp2 = force_reg (SImode, tmp2);
29647 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
29649 else
29651 emit_insn (gen_popcntbdi2 (tmp1, src));
29652 tmp2 = expand_mult (DImode, tmp1,
29653 GEN_INT ((HOST_WIDE_INT)
29654 0x01010101 << 32 | 0x01010101),
29655 NULL_RTX, 0);
29656 tmp2 = force_reg (DImode, tmp2);
29657 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
29662 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
29663 target, and SRC is the argument operand. */
29665 void
29666 rs6000_emit_parity (rtx dst, rtx src)
29668 machine_mode mode = GET_MODE (dst);
29669 rtx tmp;
29671 tmp = gen_reg_rtx (mode);
29673 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
29674 if (TARGET_CMPB)
29676 if (mode == SImode)
29678 emit_insn (gen_popcntbsi2 (tmp, src));
29679 emit_insn (gen_paritysi2_cmpb (dst, tmp));
29681 else
29683 emit_insn (gen_popcntbdi2 (tmp, src));
29684 emit_insn (gen_paritydi2_cmpb (dst, tmp));
29686 return;
29689 if (mode == SImode)
29691 /* Is mult+shift >= shift+xor+shift+xor? */
29692 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
29694 rtx tmp1, tmp2, tmp3, tmp4;
29696 tmp1 = gen_reg_rtx (SImode);
29697 emit_insn (gen_popcntbsi2 (tmp1, src));
29699 tmp2 = gen_reg_rtx (SImode);
29700 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
29701 tmp3 = gen_reg_rtx (SImode);
29702 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
29704 tmp4 = gen_reg_rtx (SImode);
29705 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
29706 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
29708 else
29709 rs6000_emit_popcount (tmp, src);
29710 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
29712 else
29714 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
29715 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
29717 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
29719 tmp1 = gen_reg_rtx (DImode);
29720 emit_insn (gen_popcntbdi2 (tmp1, src));
29722 tmp2 = gen_reg_rtx (DImode);
29723 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
29724 tmp3 = gen_reg_rtx (DImode);
29725 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
29727 tmp4 = gen_reg_rtx (DImode);
29728 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
29729 tmp5 = gen_reg_rtx (DImode);
29730 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
29732 tmp6 = gen_reg_rtx (DImode);
29733 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
29734 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
29736 else
29737 rs6000_emit_popcount (tmp, src);
29738 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
29742 /* Expand an Altivec constant permutation for little endian mode.
29743 OP0 and OP1 are the input vectors and TARGET is the output vector.
29744 SEL specifies the constant permutation vector.
29746 There are two issues: First, the two input operands must be
29747 swapped so that together they form a double-wide array in LE
29748 order. Second, the vperm instruction has surprising behavior
29749 in LE mode: it interprets the elements of the source vectors
29750 in BE mode ("left to right") and interprets the elements of
29751 the destination vector in LE mode ("right to left"). To
29752 correct for this, we must subtract each element of the permute
29753 control vector from 31.
29755 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
29756 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
29757 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
29758 serve as the permute control vector. Then, in BE mode,
29760 vperm 9,10,11,12
29762 places the desired result in vr9. However, in LE mode the
29763 vector contents will be
29765 vr10 = 00000003 00000002 00000001 00000000
29766 vr11 = 00000007 00000006 00000005 00000004
29768 The result of the vperm using the same permute control vector is
29770 vr9 = 05000000 07000000 01000000 03000000
29772 That is, the leftmost 4 bytes of vr10 are interpreted as the
29773 source for the rightmost 4 bytes of vr9, and so on.
29775 If we change the permute control vector to
29777 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
29779 and issue
29781 vperm 9,11,10,12
29783 we get the desired
29785 vr9 = 00000006 00000004 00000002 00000000. */
29787 static void
29788 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
29789 const vec_perm_indices &sel)
29791 unsigned int i;
29792 rtx perm[16];
29793 rtx constv, unspec;
29795 /* Unpack and adjust the constant selector. */
29796 for (i = 0; i < 16; ++i)
29798 unsigned int elt = 31 - (sel[i] & 31);
29799 perm[i] = GEN_INT (elt);
29802 /* Expand to a permute, swapping the inputs and using the
29803 adjusted selector. */
29804 if (!REG_P (op0))
29805 op0 = force_reg (V16QImode, op0);
29806 if (!REG_P (op1))
29807 op1 = force_reg (V16QImode, op1);
29809 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
29810 constv = force_reg (V16QImode, constv);
29811 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
29812 UNSPEC_VPERM);
29813 if (!REG_P (target))
29815 rtx tmp = gen_reg_rtx (V16QImode);
29816 emit_move_insn (tmp, unspec);
29817 unspec = tmp;
29820 emit_move_insn (target, unspec);
29823 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
29824 permute control vector. But here it's not a constant, so we must
29825 generate a vector NAND or NOR to do the adjustment. */
29827 void
29828 altivec_expand_vec_perm_le (rtx operands[4])
29830 rtx notx, iorx, unspec;
29831 rtx target = operands[0];
29832 rtx op0 = operands[1];
29833 rtx op1 = operands[2];
29834 rtx sel = operands[3];
29835 rtx tmp = target;
29836 rtx norreg = gen_reg_rtx (V16QImode);
29837 machine_mode mode = GET_MODE (target);
29839 /* Get everything in regs so the pattern matches. */
29840 if (!REG_P (op0))
29841 op0 = force_reg (mode, op0);
29842 if (!REG_P (op1))
29843 op1 = force_reg (mode, op1);
29844 if (!REG_P (sel))
29845 sel = force_reg (V16QImode, sel);
29846 if (!REG_P (target))
29847 tmp = gen_reg_rtx (mode);
29849 if (TARGET_P9_VECTOR)
29851 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
29852 UNSPEC_VPERMR);
29854 else
29856 /* Invert the selector with a VNAND if available, else a VNOR.
29857 The VNAND is preferred for future fusion opportunities. */
29858 notx = gen_rtx_NOT (V16QImode, sel);
29859 iorx = (TARGET_P8_VECTOR
29860 ? gen_rtx_IOR (V16QImode, notx, notx)
29861 : gen_rtx_AND (V16QImode, notx, notx));
29862 emit_insn (gen_rtx_SET (norreg, iorx));
29864 /* Permute with operands reversed and adjusted selector. */
29865 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
29866 UNSPEC_VPERM);
29869 /* Copy into target, possibly by way of a register. */
29870 if (!REG_P (target))
29872 emit_move_insn (tmp, unspec);
29873 unspec = tmp;
29876 emit_move_insn (target, unspec);
29879 /* Expand an Altivec constant permutation. Return true if we match
29880 an efficient implementation; false to fall back to VPERM.
29882 OP0 and OP1 are the input vectors and TARGET is the output vector.
29883 SEL specifies the constant permutation vector. */
29885 static bool
29886 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
29887 const vec_perm_indices &sel)
29889 struct altivec_perm_insn {
29890 HOST_WIDE_INT mask;
29891 enum insn_code impl;
29892 unsigned char perm[16];
29894 static const struct altivec_perm_insn patterns[] = {
29895 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
29896 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
29897 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
29898 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
29899 { OPTION_MASK_ALTIVEC,
29900 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
29901 : CODE_FOR_altivec_vmrglb_direct),
29902 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
29903 { OPTION_MASK_ALTIVEC,
29904 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
29905 : CODE_FOR_altivec_vmrglh_direct),
29906 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
29907 { OPTION_MASK_ALTIVEC,
29908 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
29909 : CODE_FOR_altivec_vmrglw_direct),
29910 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
29911 { OPTION_MASK_ALTIVEC,
29912 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
29913 : CODE_FOR_altivec_vmrghb_direct),
29914 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
29915 { OPTION_MASK_ALTIVEC,
29916 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
29917 : CODE_FOR_altivec_vmrghh_direct),
29918 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
29919 { OPTION_MASK_ALTIVEC,
29920 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
29921 : CODE_FOR_altivec_vmrghw_direct),
29922 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
29923 { OPTION_MASK_P8_VECTOR,
29924 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
29925 : CODE_FOR_p8_vmrgow_v4sf_direct),
29926 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
29927 { OPTION_MASK_P8_VECTOR,
29928 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
29929 : CODE_FOR_p8_vmrgew_v4sf_direct),
29930 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
29933 unsigned int i, j, elt, which;
29934 unsigned char perm[16];
29935 rtx x;
29936 bool one_vec;
29938 /* Unpack the constant selector. */
29939 for (i = which = 0; i < 16; ++i)
29941 elt = sel[i] & 31;
29942 which |= (elt < 16 ? 1 : 2);
29943 perm[i] = elt;
29946 /* Simplify the constant selector based on operands. */
29947 switch (which)
29949 default:
29950 gcc_unreachable ();
29952 case 3:
29953 one_vec = false;
29954 if (!rtx_equal_p (op0, op1))
29955 break;
29956 /* FALLTHRU */
29958 case 2:
29959 for (i = 0; i < 16; ++i)
29960 perm[i] &= 15;
29961 op0 = op1;
29962 one_vec = true;
29963 break;
29965 case 1:
29966 op1 = op0;
29967 one_vec = true;
29968 break;
29971 /* Look for splat patterns. */
29972 if (one_vec)
29974 elt = perm[0];
29976 for (i = 0; i < 16; ++i)
29977 if (perm[i] != elt)
29978 break;
29979 if (i == 16)
29981 if (!BYTES_BIG_ENDIAN)
29982 elt = 15 - elt;
29983 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
29984 return true;
29987 if (elt % 2 == 0)
29989 for (i = 0; i < 16; i += 2)
29990 if (perm[i] != elt || perm[i + 1] != elt + 1)
29991 break;
29992 if (i == 16)
29994 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
29995 x = gen_reg_rtx (V8HImode);
29996 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
29997 GEN_INT (field)));
29998 emit_move_insn (target, gen_lowpart (V16QImode, x));
29999 return true;
30003 if (elt % 4 == 0)
30005 for (i = 0; i < 16; i += 4)
30006 if (perm[i] != elt
30007 || perm[i + 1] != elt + 1
30008 || perm[i + 2] != elt + 2
30009 || perm[i + 3] != elt + 3)
30010 break;
30011 if (i == 16)
30013 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30014 x = gen_reg_rtx (V4SImode);
30015 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30016 GEN_INT (field)));
30017 emit_move_insn (target, gen_lowpart (V16QImode, x));
30018 return true;
30023 /* Look for merge and pack patterns. */
30024 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30026 bool swapped;
30028 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30029 continue;
30031 elt = patterns[j].perm[0];
30032 if (perm[0] == elt)
30033 swapped = false;
30034 else if (perm[0] == elt + 16)
30035 swapped = true;
30036 else
30037 continue;
30038 for (i = 1; i < 16; ++i)
30040 elt = patterns[j].perm[i];
30041 if (swapped)
30042 elt = (elt >= 16 ? elt - 16 : elt + 16);
30043 else if (one_vec && elt >= 16)
30044 elt -= 16;
30045 if (perm[i] != elt)
30046 break;
30048 if (i == 16)
30050 enum insn_code icode = patterns[j].impl;
30051 machine_mode omode = insn_data[icode].operand[0].mode;
30052 machine_mode imode = insn_data[icode].operand[1].mode;
30054 /* For little-endian, don't use vpkuwum and vpkuhum if the
30055 underlying vector type is not V4SI and V8HI, respectively.
30056 For example, using vpkuwum with a V8HI picks up the even
30057 halfwords (BE numbering) when the even halfwords (LE
30058 numbering) are what we need. */
30059 if (!BYTES_BIG_ENDIAN
30060 && icode == CODE_FOR_altivec_vpkuwum_direct
30061 && ((REG_P (op0)
30062 && GET_MODE (op0) != V4SImode)
30063 || (SUBREG_P (op0)
30064 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30065 continue;
30066 if (!BYTES_BIG_ENDIAN
30067 && icode == CODE_FOR_altivec_vpkuhum_direct
30068 && ((REG_P (op0)
30069 && GET_MODE (op0) != V8HImode)
30070 || (SUBREG_P (op0)
30071 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30072 continue;
30074 /* For little-endian, the two input operands must be swapped
30075 (or swapped back) to ensure proper right-to-left numbering
30076 from 0 to 2N-1. */
30077 if (swapped ^ !BYTES_BIG_ENDIAN)
30078 std::swap (op0, op1);
30079 if (imode != V16QImode)
30081 op0 = gen_lowpart (imode, op0);
30082 op1 = gen_lowpart (imode, op1);
30084 if (omode == V16QImode)
30085 x = target;
30086 else
30087 x = gen_reg_rtx (omode);
30088 emit_insn (GEN_FCN (icode) (x, op0, op1));
30089 if (omode != V16QImode)
30090 emit_move_insn (target, gen_lowpart (V16QImode, x));
30091 return true;
30095 if (!BYTES_BIG_ENDIAN)
30097 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
30098 return true;
30101 return false;
30104 /* Expand a VSX Permute Doubleword constant permutation.
30105 Return true if we match an efficient implementation. */
30107 static bool
30108 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30109 unsigned char perm0, unsigned char perm1)
30111 rtx x;
30113 /* If both selectors come from the same operand, fold to single op. */
30114 if ((perm0 & 2) == (perm1 & 2))
30116 if (perm0 & 2)
30117 op0 = op1;
30118 else
30119 op1 = op0;
30121 /* If both operands are equal, fold to simpler permutation. */
30122 if (rtx_equal_p (op0, op1))
30124 perm0 = perm0 & 1;
30125 perm1 = (perm1 & 1) + 2;
30127 /* If the first selector comes from the second operand, swap. */
30128 else if (perm0 & 2)
30130 if (perm1 & 2)
30131 return false;
30132 perm0 -= 2;
30133 perm1 += 2;
30134 std::swap (op0, op1);
30136 /* If the second selector does not come from the second operand, fail. */
30137 else if ((perm1 & 2) == 0)
30138 return false;
30140 /* Success! */
30141 if (target != NULL)
30143 machine_mode vmode, dmode;
30144 rtvec v;
30146 vmode = GET_MODE (target);
30147 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
30148 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
30149 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
30150 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
30151 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
30152 emit_insn (gen_rtx_SET (target, x));
30154 return true;
30157 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
30159 static bool
30160 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
30161 rtx op1, const vec_perm_indices &sel)
30163 bool testing_p = !target;
30165 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30166 if (TARGET_ALTIVEC && testing_p)
30167 return true;
30169 /* Check for ps_merge* or xxpermdi insns. */
30170 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
30172 if (testing_p)
30174 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
30175 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
30177 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
30178 return true;
30181 if (TARGET_ALTIVEC)
30183 /* Force the target-independent code to lower to V16QImode. */
30184 if (vmode != V16QImode)
30185 return false;
30186 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
30187 return true;
30190 return false;
30193 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
30194 OP0 and OP1 are the input vectors and TARGET is the output vector.
30195 PERM specifies the constant permutation vector. */
30197 static void
30198 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
30199 machine_mode vmode, const vec_perm_builder &perm)
30201 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
30202 if (x != target)
30203 emit_move_insn (target, x);
30206 /* Expand an extract even operation. */
30208 void
30209 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
30211 machine_mode vmode = GET_MODE (target);
30212 unsigned i, nelt = GET_MODE_NUNITS (vmode);
30213 vec_perm_builder perm (nelt, nelt, 1);
30215 for (i = 0; i < nelt; i++)
30216 perm.quick_push (i * 2);
30218 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30221 /* Expand a vector interleave operation. */
30223 void
30224 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
30226 machine_mode vmode = GET_MODE (target);
30227 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
30228 vec_perm_builder perm (nelt, nelt, 1);
30230 high = (highp ? 0 : nelt / 2);
30231 for (i = 0; i < nelt / 2; i++)
30233 perm.quick_push (i + high);
30234 perm.quick_push (i + nelt + high);
30237 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30240 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
30241 void
30242 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
30244 HOST_WIDE_INT hwi_scale (scale);
30245 REAL_VALUE_TYPE r_pow;
30246 rtvec v = rtvec_alloc (2);
30247 rtx elt;
30248 rtx scale_vec = gen_reg_rtx (V2DFmode);
30249 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
30250 elt = const_double_from_real_value (r_pow, DFmode);
30251 RTVEC_ELT (v, 0) = elt;
30252 RTVEC_ELT (v, 1) = elt;
30253 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
30254 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
30257 /* Return an RTX representing where to find the function value of a
30258 function returning MODE. */
30259 static rtx
30260 rs6000_complex_function_value (machine_mode mode)
30262 unsigned int regno;
30263 rtx r1, r2;
30264 machine_mode inner = GET_MODE_INNER (mode);
30265 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
30267 if (TARGET_FLOAT128_TYPE
30268 && (mode == KCmode
30269 || (mode == TCmode && TARGET_IEEEQUAD)))
30270 regno = ALTIVEC_ARG_RETURN;
30272 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30273 regno = FP_ARG_RETURN;
30275 else
30277 regno = GP_ARG_RETURN;
30279 /* 32-bit is OK since it'll go in r3/r4. */
30280 if (TARGET_32BIT && inner_bytes >= 4)
30281 return gen_rtx_REG (mode, regno);
30284 if (inner_bytes >= 8)
30285 return gen_rtx_REG (mode, regno);
30287 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
30288 const0_rtx);
30289 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
30290 GEN_INT (inner_bytes));
30291 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
30294 /* Return an rtx describing a return value of MODE as a PARALLEL
30295 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
30296 stride REG_STRIDE. */
30298 static rtx
30299 rs6000_parallel_return (machine_mode mode,
30300 int n_elts, machine_mode elt_mode,
30301 unsigned int regno, unsigned int reg_stride)
30303 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
30305 int i;
30306 for (i = 0; i < n_elts; i++)
30308 rtx r = gen_rtx_REG (elt_mode, regno);
30309 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
30310 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
30311 regno += reg_stride;
30314 return par;
30317 /* Target hook for TARGET_FUNCTION_VALUE.
30319 An integer value is in r3 and a floating-point value is in fp1,
30320 unless -msoft-float. */
30322 static rtx
30323 rs6000_function_value (const_tree valtype,
30324 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
30325 bool outgoing ATTRIBUTE_UNUSED)
30327 machine_mode mode;
30328 unsigned int regno;
30329 machine_mode elt_mode;
30330 int n_elts;
30332 /* Special handling for structs in darwin64. */
30333 if (TARGET_MACHO
30334 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
30336 CUMULATIVE_ARGS valcum;
30337 rtx valret;
30339 valcum.words = 0;
30340 valcum.fregno = FP_ARG_MIN_REG;
30341 valcum.vregno = ALTIVEC_ARG_MIN_REG;
30342 /* Do a trial code generation as if this were going to be passed as
30343 an argument; if any part goes in memory, we return NULL. */
30344 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
30345 if (valret)
30346 return valret;
30347 /* Otherwise fall through to standard ABI rules. */
30350 mode = TYPE_MODE (valtype);
30352 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30353 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
30355 int first_reg, n_regs;
30357 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
30359 /* _Decimal128 must use even/odd register pairs. */
30360 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30361 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
30363 else
30365 first_reg = ALTIVEC_ARG_RETURN;
30366 n_regs = 1;
30369 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
30372 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
30373 if (TARGET_32BIT && TARGET_POWERPC64)
30374 switch (mode)
30376 default:
30377 break;
30378 case E_DImode:
30379 case E_SCmode:
30380 case E_DCmode:
30381 case E_TCmode:
30382 int count = GET_MODE_SIZE (mode) / 4;
30383 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
30386 if ((INTEGRAL_TYPE_P (valtype)
30387 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
30388 || POINTER_TYPE_P (valtype))
30389 mode = TARGET_32BIT ? SImode : DImode;
30391 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30392 /* _Decimal128 must use an even/odd register pair. */
30393 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30394 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
30395 && !FLOAT128_VECTOR_P (mode))
30396 regno = FP_ARG_RETURN;
30397 else if (TREE_CODE (valtype) == COMPLEX_TYPE
30398 && targetm.calls.split_complex_arg)
30399 return rs6000_complex_function_value (mode);
30400 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30401 return register is used in both cases, and we won't see V2DImode/V2DFmode
30402 for pure altivec, combine the two cases. */
30403 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
30404 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
30405 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
30406 regno = ALTIVEC_ARG_RETURN;
30407 else
30408 regno = GP_ARG_RETURN;
30410 return gen_rtx_REG (mode, regno);
30413 /* Define how to find the value returned by a library function
30414 assuming the value has mode MODE. */
30416 rs6000_libcall_value (machine_mode mode)
30418 unsigned int regno;
30420 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30421 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
30422 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
30424 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30425 /* _Decimal128 must use an even/odd register pair. */
30426 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30427 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
30428 regno = FP_ARG_RETURN;
30429 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30430 return register is used in both cases, and we won't see V2DImode/V2DFmode
30431 for pure altivec, combine the two cases. */
30432 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
30433 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
30434 regno = ALTIVEC_ARG_RETURN;
30435 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
30436 return rs6000_complex_function_value (mode);
30437 else
30438 regno = GP_ARG_RETURN;
30440 return gen_rtx_REG (mode, regno);
30443 /* Compute register pressure classes. We implement the target hook to avoid
30444 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
30445 lead to incorrect estimates of number of available registers and therefor
30446 increased register pressure/spill. */
30447 static int
30448 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
30450 int n;
30452 n = 0;
30453 pressure_classes[n++] = GENERAL_REGS;
30454 if (TARGET_VSX)
30455 pressure_classes[n++] = VSX_REGS;
30456 else
30458 if (TARGET_ALTIVEC)
30459 pressure_classes[n++] = ALTIVEC_REGS;
30460 if (TARGET_HARD_FLOAT)
30461 pressure_classes[n++] = FLOAT_REGS;
30463 pressure_classes[n++] = CR_REGS;
30464 pressure_classes[n++] = SPECIAL_REGS;
30466 return n;
30469 /* Given FROM and TO register numbers, say whether this elimination is allowed.
30470 Frame pointer elimination is automatically handled.
30472 For the RS/6000, if frame pointer elimination is being done, we would like
30473 to convert ap into fp, not sp.
30475 We need r30 if -mminimal-toc was specified, and there are constant pool
30476 references. */
30478 static bool
30479 rs6000_can_eliminate (const int from, const int to)
30481 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
30482 ? ! frame_pointer_needed
30483 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
30484 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC_OR_PCREL
30485 || constant_pool_empty_p ()
30486 : true);
30489 /* Define the offset between two registers, FROM to be eliminated and its
30490 replacement TO, at the start of a routine. */
30491 HOST_WIDE_INT
30492 rs6000_initial_elimination_offset (int from, int to)
30494 rs6000_stack_t *info = rs6000_stack_info ();
30495 HOST_WIDE_INT offset;
30497 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30498 offset = info->push_p ? 0 : -info->total_size;
30499 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30501 offset = info->push_p ? 0 : -info->total_size;
30502 if (FRAME_GROWS_DOWNWARD)
30503 offset += info->fixed_size + info->vars_size + info->parm_size;
30505 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30506 offset = FRAME_GROWS_DOWNWARD
30507 ? info->fixed_size + info->vars_size + info->parm_size
30508 : 0;
30509 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30510 offset = info->total_size;
30511 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30512 offset = info->push_p ? info->total_size : 0;
30513 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
30514 offset = 0;
30515 else
30516 gcc_unreachable ();
30518 return offset;
30521 /* Fill in sizes of registers used by unwinder. */
30523 static void
30524 rs6000_init_dwarf_reg_sizes_extra (tree address)
30526 if (TARGET_MACHO && ! TARGET_ALTIVEC)
30528 int i;
30529 machine_mode mode = TYPE_MODE (char_type_node);
30530 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
30531 rtx mem = gen_rtx_MEM (BLKmode, addr);
30532 rtx value = gen_int_mode (16, mode);
30534 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
30535 The unwinder still needs to know the size of Altivec registers. */
30537 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
30539 int column = DWARF_REG_TO_UNWIND_COLUMN
30540 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
30541 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
30543 emit_move_insn (adjust_address (mem, mode, offset), value);
30548 /* Map internal gcc register numbers to debug format register numbers.
30549 FORMAT specifies the type of debug register number to use:
30550 0 -- debug information, except for frame-related sections
30551 1 -- DWARF .debug_frame section
30552 2 -- DWARF .eh_frame section */
30554 unsigned int
30555 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
30557 /* On some platforms, we use the standard DWARF register
30558 numbering for .debug_info and .debug_frame. */
30559 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
30561 #ifdef RS6000_USE_DWARF_NUMBERING
30562 if (regno <= 31)
30563 return regno;
30564 if (FP_REGNO_P (regno))
30565 return regno - FIRST_FPR_REGNO + 32;
30566 if (ALTIVEC_REGNO_P (regno))
30567 return regno - FIRST_ALTIVEC_REGNO + 1124;
30568 if (regno == LR_REGNO)
30569 return 108;
30570 if (regno == CTR_REGNO)
30571 return 109;
30572 if (regno == CA_REGNO)
30573 return 101; /* XER */
30574 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
30575 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
30576 The actual code emitted saves the whole of CR, so we map CR2_REGNO
30577 to the DWARF reg for CR. */
30578 if (format == 1 && regno == CR2_REGNO)
30579 return 64;
30580 if (CR_REGNO_P (regno))
30581 return regno - CR0_REGNO + 86;
30582 if (regno == VRSAVE_REGNO)
30583 return 356;
30584 if (regno == VSCR_REGNO)
30585 return 67;
30587 /* These do not make much sense. */
30588 if (regno == FRAME_POINTER_REGNUM)
30589 return 111;
30590 if (regno == ARG_POINTER_REGNUM)
30591 return 67;
30592 if (regno == 64)
30593 return 100;
30595 gcc_unreachable ();
30596 #endif
30599 /* We use the GCC 7 (and before) internal number for non-DWARF debug
30600 information, and also for .eh_frame. */
30601 /* Translate the regnos to their numbers in GCC 7 (and before). */
30602 if (regno <= 31)
30603 return regno;
30604 if (FP_REGNO_P (regno))
30605 return regno - FIRST_FPR_REGNO + 32;
30606 if (ALTIVEC_REGNO_P (regno))
30607 return regno - FIRST_ALTIVEC_REGNO + 77;
30608 if (regno == LR_REGNO)
30609 return 65;
30610 if (regno == CTR_REGNO)
30611 return 66;
30612 if (regno == CA_REGNO)
30613 return 76; /* XER */
30614 if (CR_REGNO_P (regno))
30615 return regno - CR0_REGNO + 68;
30616 if (regno == VRSAVE_REGNO)
30617 return 109;
30618 if (regno == VSCR_REGNO)
30619 return 110;
30621 if (regno == FRAME_POINTER_REGNUM)
30622 return 111;
30623 if (regno == ARG_POINTER_REGNUM)
30624 return 67;
30625 if (regno == 64)
30626 return 64;
30628 gcc_unreachable ();
30631 /* target hook eh_return_filter_mode */
30632 static scalar_int_mode
30633 rs6000_eh_return_filter_mode (void)
30635 return TARGET_32BIT ? SImode : word_mode;
30638 /* Target hook for translate_mode_attribute. */
30639 static machine_mode
30640 rs6000_translate_mode_attribute (machine_mode mode)
30642 if ((FLOAT128_IEEE_P (mode)
30643 && ieee128_float_type_node == long_double_type_node)
30644 || (FLOAT128_IBM_P (mode)
30645 && ibm128_float_type_node == long_double_type_node))
30646 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
30647 return mode;
30650 /* Target hook for scalar_mode_supported_p. */
30651 static bool
30652 rs6000_scalar_mode_supported_p (scalar_mode mode)
30654 /* -m32 does not support TImode. This is the default, from
30655 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
30656 same ABI as for -m32. But default_scalar_mode_supported_p allows
30657 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
30658 for -mpowerpc64. */
30659 if (TARGET_32BIT && mode == TImode)
30660 return false;
30662 if (DECIMAL_FLOAT_MODE_P (mode))
30663 return default_decimal_float_supported_p ();
30664 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
30665 return true;
30666 else
30667 return default_scalar_mode_supported_p (mode);
30670 /* Target hook for vector_mode_supported_p. */
30671 static bool
30672 rs6000_vector_mode_supported_p (machine_mode mode)
30674 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
30675 128-bit, the compiler might try to widen IEEE 128-bit to IBM
30676 double-double. */
30677 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
30678 return true;
30680 else
30681 return false;
30684 /* Target hook for floatn_mode. */
30685 static opt_scalar_float_mode
30686 rs6000_floatn_mode (int n, bool extended)
30688 if (extended)
30690 switch (n)
30692 case 32:
30693 return DFmode;
30695 case 64:
30696 if (TARGET_FLOAT128_TYPE)
30697 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30698 else
30699 return opt_scalar_float_mode ();
30701 case 128:
30702 return opt_scalar_float_mode ();
30704 default:
30705 /* Those are the only valid _FloatNx types. */
30706 gcc_unreachable ();
30709 else
30711 switch (n)
30713 case 32:
30714 return SFmode;
30716 case 64:
30717 return DFmode;
30719 case 128:
30720 if (TARGET_FLOAT128_TYPE)
30721 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30722 else
30723 return opt_scalar_float_mode ();
30725 default:
30726 return opt_scalar_float_mode ();
30732 /* Target hook for c_mode_for_suffix. */
30733 static machine_mode
30734 rs6000_c_mode_for_suffix (char suffix)
30736 if (TARGET_FLOAT128_TYPE)
30738 if (suffix == 'q' || suffix == 'Q')
30739 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30741 /* At the moment, we are not defining a suffix for IBM extended double.
30742 If/when the default for -mabi=ieeelongdouble is changed, and we want
30743 to support __ibm128 constants in legacy library code, we may need to
30744 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
30745 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
30746 __float80 constants. */
30749 return VOIDmode;
30752 /* Target hook for invalid_arg_for_unprototyped_fn. */
30753 static const char *
30754 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
30756 return (!rs6000_darwin64_abi
30757 && typelist == 0
30758 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
30759 && (funcdecl == NULL_TREE
30760 || (TREE_CODE (funcdecl) == FUNCTION_DECL
30761 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
30762 ? N_("AltiVec argument passed to unprototyped function")
30763 : NULL;
30766 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
30767 setup by using __stack_chk_fail_local hidden function instead of
30768 calling __stack_chk_fail directly. Otherwise it is better to call
30769 __stack_chk_fail directly. */
30771 static tree ATTRIBUTE_UNUSED
30772 rs6000_stack_protect_fail (void)
30774 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
30775 ? default_hidden_stack_protect_fail ()
30776 : default_external_stack_protect_fail ();
30779 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
30781 #if TARGET_ELF
30782 static unsigned HOST_WIDE_INT
30783 rs6000_asan_shadow_offset (void)
30785 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
30787 #endif
30789 /* Mask options that we want to support inside of attribute((target)) and
30790 #pragma GCC target operations. Note, we do not include things like
30791 64/32-bit, endianness, hard/soft floating point, etc. that would have
30792 different calling sequences. */
30794 struct rs6000_opt_mask {
30795 const char *name; /* option name */
30796 HOST_WIDE_INT mask; /* mask to set */
30797 bool invert; /* invert sense of mask */
30798 bool valid_target; /* option is a target option */
30801 static struct rs6000_opt_mask const rs6000_opt_masks[] =
30803 { "altivec", OPTION_MASK_ALTIVEC, false, true },
30804 { "cmpb", OPTION_MASK_CMPB, false, true },
30805 { "crypto", OPTION_MASK_CRYPTO, false, true },
30806 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
30807 { "dlmzb", OPTION_MASK_DLMZB, false, true },
30808 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
30809 false, true },
30810 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
30811 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
30812 { "fprnd", OPTION_MASK_FPRND, false, true },
30813 { "future", OPTION_MASK_FUTURE, false, true },
30814 { "hard-dfp", OPTION_MASK_DFP, false, true },
30815 { "htm", OPTION_MASK_HTM, false, true },
30816 { "isel", OPTION_MASK_ISEL, false, true },
30817 { "mfcrf", OPTION_MASK_MFCRF, false, true },
30818 { "mfpgpr", 0, false, true },
30819 { "modulo", OPTION_MASK_MODULO, false, true },
30820 { "mulhw", OPTION_MASK_MULHW, false, true },
30821 { "multiple", OPTION_MASK_MULTIPLE, false, true },
30822 { "pcrel", OPTION_MASK_PCREL, false, true },
30823 { "popcntb", OPTION_MASK_POPCNTB, false, true },
30824 { "popcntd", OPTION_MASK_POPCNTD, false, true },
30825 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
30826 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
30827 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
30828 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
30829 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
30830 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
30831 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
30832 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
30833 { "prefixed-addr", OPTION_MASK_PREFIXED_ADDR, false, true },
30834 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
30835 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
30836 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
30837 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
30838 { "string", 0, false, true },
30839 { "update", OPTION_MASK_NO_UPDATE, true , true },
30840 { "vsx", OPTION_MASK_VSX, false, true },
30841 #ifdef OPTION_MASK_64BIT
30842 #if TARGET_AIX_OS
30843 { "aix64", OPTION_MASK_64BIT, false, false },
30844 { "aix32", OPTION_MASK_64BIT, true, false },
30845 #else
30846 { "64", OPTION_MASK_64BIT, false, false },
30847 { "32", OPTION_MASK_64BIT, true, false },
30848 #endif
30849 #endif
30850 #ifdef OPTION_MASK_EABI
30851 { "eabi", OPTION_MASK_EABI, false, false },
30852 #endif
30853 #ifdef OPTION_MASK_LITTLE_ENDIAN
30854 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
30855 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
30856 #endif
30857 #ifdef OPTION_MASK_RELOCATABLE
30858 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
30859 #endif
30860 #ifdef OPTION_MASK_STRICT_ALIGN
30861 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
30862 #endif
30863 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
30864 { "string", 0, false, false },
30867 /* Builtin mask mapping for printing the flags. */
30868 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
30870 { "altivec", RS6000_BTM_ALTIVEC, false, false },
30871 { "vsx", RS6000_BTM_VSX, false, false },
30872 { "fre", RS6000_BTM_FRE, false, false },
30873 { "fres", RS6000_BTM_FRES, false, false },
30874 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
30875 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
30876 { "popcntd", RS6000_BTM_POPCNTD, false, false },
30877 { "cell", RS6000_BTM_CELL, false, false },
30878 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
30879 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
30880 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
30881 { "crypto", RS6000_BTM_CRYPTO, false, false },
30882 { "htm", RS6000_BTM_HTM, false, false },
30883 { "hard-dfp", RS6000_BTM_DFP, false, false },
30884 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
30885 { "long-double-128", RS6000_BTM_LDBL128, false, false },
30886 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
30887 { "float128", RS6000_BTM_FLOAT128, false, false },
30888 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
30891 /* Option variables that we want to support inside attribute((target)) and
30892 #pragma GCC target operations. */
30894 struct rs6000_opt_var {
30895 const char *name; /* option name */
30896 size_t global_offset; /* offset of the option in global_options. */
30897 size_t target_offset; /* offset of the option in target options. */
30900 static struct rs6000_opt_var const rs6000_opt_vars[] =
30902 { "friz",
30903 offsetof (struct gcc_options, x_TARGET_FRIZ),
30904 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
30905 { "avoid-indexed-addresses",
30906 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
30907 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
30908 { "longcall",
30909 offsetof (struct gcc_options, x_rs6000_default_long_calls),
30910 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
30911 { "optimize-swaps",
30912 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
30913 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
30914 { "allow-movmisalign",
30915 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
30916 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
30917 { "sched-groups",
30918 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
30919 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
30920 { "always-hint",
30921 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
30922 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
30923 { "align-branch-targets",
30924 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
30925 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
30926 { "tls-markers",
30927 offsetof (struct gcc_options, x_tls_markers),
30928 offsetof (struct cl_target_option, x_tls_markers), },
30929 { "sched-prolog",
30930 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30931 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30932 { "sched-epilog",
30933 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30934 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30935 { "speculate-indirect-jumps",
30936 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
30937 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
30940 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
30941 parsing. Return true if there were no errors. */
30943 static bool
30944 rs6000_inner_target_options (tree args, bool attr_p)
30946 bool ret = true;
30948 if (args == NULL_TREE)
30951 else if (TREE_CODE (args) == STRING_CST)
30953 char *p = ASTRDUP (TREE_STRING_POINTER (args));
30954 char *q;
30956 while ((q = strtok (p, ",")) != NULL)
30958 bool error_p = false;
30959 bool not_valid_p = false;
30960 const char *cpu_opt = NULL;
30962 p = NULL;
30963 if (strncmp (q, "cpu=", 4) == 0)
30965 int cpu_index = rs6000_cpu_name_lookup (q+4);
30966 if (cpu_index >= 0)
30967 rs6000_cpu_index = cpu_index;
30968 else
30970 error_p = true;
30971 cpu_opt = q+4;
30974 else if (strncmp (q, "tune=", 5) == 0)
30976 int tune_index = rs6000_cpu_name_lookup (q+5);
30977 if (tune_index >= 0)
30978 rs6000_tune_index = tune_index;
30979 else
30981 error_p = true;
30982 cpu_opt = q+5;
30985 else
30987 size_t i;
30988 bool invert = false;
30989 char *r = q;
30991 error_p = true;
30992 if (strncmp (r, "no-", 3) == 0)
30994 invert = true;
30995 r += 3;
30998 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
30999 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31001 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31003 if (!rs6000_opt_masks[i].valid_target)
31004 not_valid_p = true;
31005 else
31007 error_p = false;
31008 rs6000_isa_flags_explicit |= mask;
31010 /* VSX needs altivec, so -mvsx automagically sets
31011 altivec and disables -mavoid-indexed-addresses. */
31012 if (!invert)
31014 if (mask == OPTION_MASK_VSX)
31016 mask |= OPTION_MASK_ALTIVEC;
31017 TARGET_AVOID_XFORM = 0;
31021 if (rs6000_opt_masks[i].invert)
31022 invert = !invert;
31024 if (invert)
31025 rs6000_isa_flags &= ~mask;
31026 else
31027 rs6000_isa_flags |= mask;
31029 break;
31032 if (error_p && !not_valid_p)
31034 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31035 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31037 size_t j = rs6000_opt_vars[i].global_offset;
31038 *((int *) ((char *)&global_options + j)) = !invert;
31039 error_p = false;
31040 not_valid_p = false;
31041 break;
31046 if (error_p)
31048 const char *eprefix, *esuffix;
31050 ret = false;
31051 if (attr_p)
31053 eprefix = "__attribute__((__target__(";
31054 esuffix = ")))";
31056 else
31058 eprefix = "#pragma GCC target ";
31059 esuffix = "";
31062 if (cpu_opt)
31063 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
31064 q, esuffix);
31065 else if (not_valid_p)
31066 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
31067 else
31068 error ("%s%qs%s is invalid", eprefix, q, esuffix);
31073 else if (TREE_CODE (args) == TREE_LIST)
31077 tree value = TREE_VALUE (args);
31078 if (value)
31080 bool ret2 = rs6000_inner_target_options (value, attr_p);
31081 if (!ret2)
31082 ret = false;
31084 args = TREE_CHAIN (args);
31086 while (args != NULL_TREE);
31089 else
31091 error ("attribute %<target%> argument not a string");
31092 return false;
31095 return ret;
31098 /* Print out the target options as a list for -mdebug=target. */
31100 static void
31101 rs6000_debug_target_options (tree args, const char *prefix)
31103 if (args == NULL_TREE)
31104 fprintf (stderr, "%s<NULL>", prefix);
31106 else if (TREE_CODE (args) == STRING_CST)
31108 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31109 char *q;
31111 while ((q = strtok (p, ",")) != NULL)
31113 p = NULL;
31114 fprintf (stderr, "%s\"%s\"", prefix, q);
31115 prefix = ", ";
31119 else if (TREE_CODE (args) == TREE_LIST)
31123 tree value = TREE_VALUE (args);
31124 if (value)
31126 rs6000_debug_target_options (value, prefix);
31127 prefix = ", ";
31129 args = TREE_CHAIN (args);
31131 while (args != NULL_TREE);
31134 else
31135 gcc_unreachable ();
31137 return;
31141 /* Hook to validate attribute((target("..."))). */
31143 static bool
31144 rs6000_valid_attribute_p (tree fndecl,
31145 tree ARG_UNUSED (name),
31146 tree args,
31147 int flags)
31149 struct cl_target_option cur_target;
31150 bool ret;
31151 tree old_optimize;
31152 tree new_target, new_optimize;
31153 tree func_optimize;
31155 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31157 if (TARGET_DEBUG_TARGET)
31159 tree tname = DECL_NAME (fndecl);
31160 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31161 if (tname)
31162 fprintf (stderr, "function: %.*s\n",
31163 (int) IDENTIFIER_LENGTH (tname),
31164 IDENTIFIER_POINTER (tname));
31165 else
31166 fprintf (stderr, "function: unknown\n");
31168 fprintf (stderr, "args:");
31169 rs6000_debug_target_options (args, " ");
31170 fprintf (stderr, "\n");
31172 if (flags)
31173 fprintf (stderr, "flags: 0x%x\n", flags);
31175 fprintf (stderr, "--------------------\n");
31178 /* attribute((target("default"))) does nothing, beyond
31179 affecting multi-versioning. */
31180 if (TREE_VALUE (args)
31181 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
31182 && TREE_CHAIN (args) == NULL_TREE
31183 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
31184 return true;
31186 old_optimize = build_optimization_node (&global_options);
31187 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31189 /* If the function changed the optimization levels as well as setting target
31190 options, start with the optimizations specified. */
31191 if (func_optimize && func_optimize != old_optimize)
31192 cl_optimization_restore (&global_options,
31193 TREE_OPTIMIZATION (func_optimize));
31195 /* The target attributes may also change some optimization flags, so update
31196 the optimization options if necessary. */
31197 cl_target_option_save (&cur_target, &global_options);
31198 rs6000_cpu_index = rs6000_tune_index = -1;
31199 ret = rs6000_inner_target_options (args, true);
31201 /* Set up any additional state. */
31202 if (ret)
31204 ret = rs6000_option_override_internal (false);
31205 new_target = build_target_option_node (&global_options);
31207 else
31208 new_target = NULL;
31210 new_optimize = build_optimization_node (&global_options);
31212 if (!new_target)
31213 ret = false;
31215 else if (fndecl)
31217 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31219 if (old_optimize != new_optimize)
31220 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31223 cl_target_option_restore (&global_options, &cur_target);
31225 if (old_optimize != new_optimize)
31226 cl_optimization_restore (&global_options,
31227 TREE_OPTIMIZATION (old_optimize));
31229 return ret;
31233 /* Hook to validate the current #pragma GCC target and set the state, and
31234 update the macros based on what was changed. If ARGS is NULL, then
31235 POP_TARGET is used to reset the options. */
31237 bool
31238 rs6000_pragma_target_parse (tree args, tree pop_target)
31240 tree prev_tree = build_target_option_node (&global_options);
31241 tree cur_tree;
31242 struct cl_target_option *prev_opt, *cur_opt;
31243 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
31244 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
31246 if (TARGET_DEBUG_TARGET)
31248 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
31249 fprintf (stderr, "args:");
31250 rs6000_debug_target_options (args, " ");
31251 fprintf (stderr, "\n");
31253 if (pop_target)
31255 fprintf (stderr, "pop_target:\n");
31256 debug_tree (pop_target);
31258 else
31259 fprintf (stderr, "pop_target: <NULL>\n");
31261 fprintf (stderr, "--------------------\n");
31264 if (! args)
31266 cur_tree = ((pop_target)
31267 ? pop_target
31268 : target_option_default_node);
31269 cl_target_option_restore (&global_options,
31270 TREE_TARGET_OPTION (cur_tree));
31272 else
31274 rs6000_cpu_index = rs6000_tune_index = -1;
31275 if (!rs6000_inner_target_options (args, false)
31276 || !rs6000_option_override_internal (false)
31277 || (cur_tree = build_target_option_node (&global_options))
31278 == NULL_TREE)
31280 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
31281 fprintf (stderr, "invalid pragma\n");
31283 return false;
31287 target_option_current_node = cur_tree;
31288 rs6000_activate_target_options (target_option_current_node);
31290 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31291 change the macros that are defined. */
31292 if (rs6000_target_modify_macros_ptr)
31294 prev_opt = TREE_TARGET_OPTION (prev_tree);
31295 prev_bumask = prev_opt->x_rs6000_builtin_mask;
31296 prev_flags = prev_opt->x_rs6000_isa_flags;
31298 cur_opt = TREE_TARGET_OPTION (cur_tree);
31299 cur_flags = cur_opt->x_rs6000_isa_flags;
31300 cur_bumask = cur_opt->x_rs6000_builtin_mask;
31302 diff_bumask = (prev_bumask ^ cur_bumask);
31303 diff_flags = (prev_flags ^ cur_flags);
31305 if ((diff_flags != 0) || (diff_bumask != 0))
31307 /* Delete old macros. */
31308 rs6000_target_modify_macros_ptr (false,
31309 prev_flags & diff_flags,
31310 prev_bumask & diff_bumask);
31312 /* Define new macros. */
31313 rs6000_target_modify_macros_ptr (true,
31314 cur_flags & diff_flags,
31315 cur_bumask & diff_bumask);
31319 return true;
31323 /* Remember the last target of rs6000_set_current_function. */
31324 static GTY(()) tree rs6000_previous_fndecl;
31326 /* Restore target's globals from NEW_TREE and invalidate the
31327 rs6000_previous_fndecl cache. */
31329 void
31330 rs6000_activate_target_options (tree new_tree)
31332 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
31333 if (TREE_TARGET_GLOBALS (new_tree))
31334 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31335 else if (new_tree == target_option_default_node)
31336 restore_target_globals (&default_target_globals);
31337 else
31338 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
31339 rs6000_previous_fndecl = NULL_TREE;
31342 /* Establish appropriate back-end context for processing the function
31343 FNDECL. The argument might be NULL to indicate processing at top
31344 level, outside of any function scope. */
31345 static void
31346 rs6000_set_current_function (tree fndecl)
31348 if (TARGET_DEBUG_TARGET)
31350 fprintf (stderr, "\n==================== rs6000_set_current_function");
31352 if (fndecl)
31353 fprintf (stderr, ", fndecl %s (%p)",
31354 (DECL_NAME (fndecl)
31355 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
31356 : "<unknown>"), (void *)fndecl);
31358 if (rs6000_previous_fndecl)
31359 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
31361 fprintf (stderr, "\n");
31364 /* Only change the context if the function changes. This hook is called
31365 several times in the course of compiling a function, and we don't want to
31366 slow things down too much or call target_reinit when it isn't safe. */
31367 if (fndecl == rs6000_previous_fndecl)
31368 return;
31370 tree old_tree;
31371 if (rs6000_previous_fndecl == NULL_TREE)
31372 old_tree = target_option_current_node;
31373 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
31374 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
31375 else
31376 old_tree = target_option_default_node;
31378 tree new_tree;
31379 if (fndecl == NULL_TREE)
31381 if (old_tree != target_option_current_node)
31382 new_tree = target_option_current_node;
31383 else
31384 new_tree = NULL_TREE;
31386 else
31388 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31389 if (new_tree == NULL_TREE)
31390 new_tree = target_option_default_node;
31393 if (TARGET_DEBUG_TARGET)
31395 if (new_tree)
31397 fprintf (stderr, "\nnew fndecl target specific options:\n");
31398 debug_tree (new_tree);
31401 if (old_tree)
31403 fprintf (stderr, "\nold fndecl target specific options:\n");
31404 debug_tree (old_tree);
31407 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
31408 fprintf (stderr, "--------------------\n");
31411 if (new_tree && old_tree != new_tree)
31412 rs6000_activate_target_options (new_tree);
31414 if (fndecl)
31415 rs6000_previous_fndecl = fndecl;
31419 /* Save the current options */
31421 static void
31422 rs6000_function_specific_save (struct cl_target_option *ptr,
31423 struct gcc_options *opts)
31425 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
31426 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
31429 /* Restore the current options */
31431 static void
31432 rs6000_function_specific_restore (struct gcc_options *opts,
31433 struct cl_target_option *ptr)
31436 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
31437 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
31438 (void) rs6000_option_override_internal (false);
31441 /* Print the current options */
31443 static void
31444 rs6000_function_specific_print (FILE *file, int indent,
31445 struct cl_target_option *ptr)
31447 rs6000_print_isa_options (file, indent, "Isa options set",
31448 ptr->x_rs6000_isa_flags);
31450 rs6000_print_isa_options (file, indent, "Isa options explicit",
31451 ptr->x_rs6000_isa_flags_explicit);
31454 /* Helper function to print the current isa or misc options on a line. */
31456 static void
31457 rs6000_print_options_internal (FILE *file,
31458 int indent,
31459 const char *string,
31460 HOST_WIDE_INT flags,
31461 const char *prefix,
31462 const struct rs6000_opt_mask *opts,
31463 size_t num_elements)
31465 size_t i;
31466 size_t start_column = 0;
31467 size_t cur_column;
31468 size_t max_column = 120;
31469 size_t prefix_len = strlen (prefix);
31470 size_t comma_len = 0;
31471 const char *comma = "";
31473 if (indent)
31474 start_column += fprintf (file, "%*s", indent, "");
31476 if (!flags)
31478 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
31479 return;
31482 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
31484 /* Print the various mask options. */
31485 cur_column = start_column;
31486 for (i = 0; i < num_elements; i++)
31488 bool invert = opts[i].invert;
31489 const char *name = opts[i].name;
31490 const char *no_str = "";
31491 HOST_WIDE_INT mask = opts[i].mask;
31492 size_t len = comma_len + prefix_len + strlen (name);
31494 if (!invert)
31496 if ((flags & mask) == 0)
31498 no_str = "no-";
31499 len += sizeof ("no-") - 1;
31502 flags &= ~mask;
31505 else
31507 if ((flags & mask) != 0)
31509 no_str = "no-";
31510 len += sizeof ("no-") - 1;
31513 flags |= mask;
31516 cur_column += len;
31517 if (cur_column > max_column)
31519 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
31520 cur_column = start_column + len;
31521 comma = "";
31524 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
31525 comma = ", ";
31526 comma_len = sizeof (", ") - 1;
31529 fputs ("\n", file);
31532 /* Helper function to print the current isa options on a line. */
31534 static void
31535 rs6000_print_isa_options (FILE *file, int indent, const char *string,
31536 HOST_WIDE_INT flags)
31538 rs6000_print_options_internal (file, indent, string, flags, "-m",
31539 &rs6000_opt_masks[0],
31540 ARRAY_SIZE (rs6000_opt_masks));
31543 static void
31544 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
31545 HOST_WIDE_INT flags)
31547 rs6000_print_options_internal (file, indent, string, flags, "",
31548 &rs6000_builtin_mask_names[0],
31549 ARRAY_SIZE (rs6000_builtin_mask_names));
31552 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
31553 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
31554 -mupper-regs-df, etc.).
31556 If the user used -mno-power8-vector, we need to turn off all of the implicit
31557 ISA 2.07 and 3.0 options that relate to the vector unit.
31559 If the user used -mno-power9-vector, we need to turn off all of the implicit
31560 ISA 3.0 options that relate to the vector unit.
31562 This function does not handle explicit options such as the user specifying
31563 -mdirect-move. These are handled in rs6000_option_override_internal, and
31564 the appropriate error is given if needed.
31566 We return a mask of all of the implicit options that should not be enabled
31567 by default. */
31569 static HOST_WIDE_INT
31570 rs6000_disable_incompatible_switches (void)
31572 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
31573 size_t i, j;
31575 static const struct {
31576 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
31577 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
31578 const char *const name; /* name of the switch. */
31579 } flags[] = {
31580 { OPTION_MASK_FUTURE, OTHER_FUTURE_MASKS, "future" },
31581 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
31582 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
31583 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
31586 for (i = 0; i < ARRAY_SIZE (flags); i++)
31588 HOST_WIDE_INT no_flag = flags[i].no_flag;
31590 if ((rs6000_isa_flags & no_flag) == 0
31591 && (rs6000_isa_flags_explicit & no_flag) != 0)
31593 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
31594 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
31595 & rs6000_isa_flags
31596 & dep_flags);
31598 if (set_flags)
31600 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
31601 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
31603 set_flags &= ~rs6000_opt_masks[j].mask;
31604 error ("%<-mno-%s%> turns off %<-m%s%>",
31605 flags[i].name,
31606 rs6000_opt_masks[j].name);
31609 gcc_assert (!set_flags);
31612 rs6000_isa_flags &= ~dep_flags;
31613 ignore_masks |= no_flag | dep_flags;
31617 return ignore_masks;
31621 /* Helper function for printing the function name when debugging. */
31623 static const char *
31624 get_decl_name (tree fn)
31626 tree name;
31628 if (!fn)
31629 return "<null>";
31631 name = DECL_NAME (fn);
31632 if (!name)
31633 return "<no-name>";
31635 return IDENTIFIER_POINTER (name);
31638 /* Return the clone id of the target we are compiling code for in a target
31639 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
31640 the priority list for the target clones (ordered from lowest to
31641 highest). */
31643 static int
31644 rs6000_clone_priority (tree fndecl)
31646 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31647 HOST_WIDE_INT isa_masks;
31648 int ret = CLONE_DEFAULT;
31649 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
31650 const char *attrs_str = NULL;
31652 attrs = TREE_VALUE (TREE_VALUE (attrs));
31653 attrs_str = TREE_STRING_POINTER (attrs);
31655 /* Return priority zero for default function. Return the ISA needed for the
31656 function if it is not the default. */
31657 if (strcmp (attrs_str, "default") != 0)
31659 if (fn_opts == NULL_TREE)
31660 fn_opts = target_option_default_node;
31662 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
31663 isa_masks = rs6000_isa_flags;
31664 else
31665 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
31667 for (ret = CLONE_MAX - 1; ret != 0; ret--)
31668 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
31669 break;
31672 if (TARGET_DEBUG_TARGET)
31673 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
31674 get_decl_name (fndecl), ret);
31676 return ret;
31679 /* This compares the priority of target features in function DECL1 and DECL2.
31680 It returns positive value if DECL1 is higher priority, negative value if
31681 DECL2 is higher priority and 0 if they are the same. Note, priorities are
31682 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
31684 static int
31685 rs6000_compare_version_priority (tree decl1, tree decl2)
31687 int priority1 = rs6000_clone_priority (decl1);
31688 int priority2 = rs6000_clone_priority (decl2);
31689 int ret = priority1 - priority2;
31691 if (TARGET_DEBUG_TARGET)
31692 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
31693 get_decl_name (decl1), get_decl_name (decl2), ret);
31695 return ret;
31698 /* Make a dispatcher declaration for the multi-versioned function DECL.
31699 Calls to DECL function will be replaced with calls to the dispatcher
31700 by the front-end. Returns the decl of the dispatcher function. */
31702 static tree
31703 rs6000_get_function_versions_dispatcher (void *decl)
31705 tree fn = (tree) decl;
31706 struct cgraph_node *node = NULL;
31707 struct cgraph_node *default_node = NULL;
31708 struct cgraph_function_version_info *node_v = NULL;
31709 struct cgraph_function_version_info *first_v = NULL;
31711 tree dispatch_decl = NULL;
31713 struct cgraph_function_version_info *default_version_info = NULL;
31714 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
31716 if (TARGET_DEBUG_TARGET)
31717 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
31718 get_decl_name (fn));
31720 node = cgraph_node::get (fn);
31721 gcc_assert (node != NULL);
31723 node_v = node->function_version ();
31724 gcc_assert (node_v != NULL);
31726 if (node_v->dispatcher_resolver != NULL)
31727 return node_v->dispatcher_resolver;
31729 /* Find the default version and make it the first node. */
31730 first_v = node_v;
31731 /* Go to the beginning of the chain. */
31732 while (first_v->prev != NULL)
31733 first_v = first_v->prev;
31735 default_version_info = first_v;
31736 while (default_version_info != NULL)
31738 const tree decl2 = default_version_info->this_node->decl;
31739 if (is_function_default_version (decl2))
31740 break;
31741 default_version_info = default_version_info->next;
31744 /* If there is no default node, just return NULL. */
31745 if (default_version_info == NULL)
31746 return NULL;
31748 /* Make default info the first node. */
31749 if (first_v != default_version_info)
31751 default_version_info->prev->next = default_version_info->next;
31752 if (default_version_info->next)
31753 default_version_info->next->prev = default_version_info->prev;
31754 first_v->prev = default_version_info;
31755 default_version_info->next = first_v;
31756 default_version_info->prev = NULL;
31759 default_node = default_version_info->this_node;
31761 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
31762 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31763 "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
31764 "exports hardware capability bits");
31765 #else
31767 if (targetm.has_ifunc_p ())
31769 struct cgraph_function_version_info *it_v = NULL;
31770 struct cgraph_node *dispatcher_node = NULL;
31771 struct cgraph_function_version_info *dispatcher_version_info = NULL;
31773 /* Right now, the dispatching is done via ifunc. */
31774 dispatch_decl = make_dispatcher_decl (default_node->decl);
31776 dispatcher_node = cgraph_node::get_create (dispatch_decl);
31777 gcc_assert (dispatcher_node != NULL);
31778 dispatcher_node->dispatcher_function = 1;
31779 dispatcher_version_info
31780 = dispatcher_node->insert_new_function_version ();
31781 dispatcher_version_info->next = default_version_info;
31782 dispatcher_node->definition = 1;
31784 /* Set the dispatcher for all the versions. */
31785 it_v = default_version_info;
31786 while (it_v != NULL)
31788 it_v->dispatcher_resolver = dispatch_decl;
31789 it_v = it_v->next;
31792 else
31794 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31795 "multiversioning needs ifunc which is not supported "
31796 "on this target");
31798 #endif
31800 return dispatch_decl;
31803 /* Make the resolver function decl to dispatch the versions of a multi-
31804 versioned function, DEFAULT_DECL. Create an empty basic block in the
31805 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
31806 function. */
31808 static tree
31809 make_resolver_func (const tree default_decl,
31810 const tree dispatch_decl,
31811 basic_block *empty_bb)
31813 /* Make the resolver function static. The resolver function returns
31814 void *. */
31815 tree decl_name = clone_function_name (default_decl, "resolver");
31816 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
31817 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
31818 tree decl = build_fn_decl (resolver_name, type);
31819 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
31821 DECL_NAME (decl) = decl_name;
31822 TREE_USED (decl) = 1;
31823 DECL_ARTIFICIAL (decl) = 1;
31824 DECL_IGNORED_P (decl) = 0;
31825 TREE_PUBLIC (decl) = 0;
31826 DECL_UNINLINABLE (decl) = 1;
31828 /* Resolver is not external, body is generated. */
31829 DECL_EXTERNAL (decl) = 0;
31830 DECL_EXTERNAL (dispatch_decl) = 0;
31832 DECL_CONTEXT (decl) = NULL_TREE;
31833 DECL_INITIAL (decl) = make_node (BLOCK);
31834 DECL_STATIC_CONSTRUCTOR (decl) = 0;
31836 /* Build result decl and add to function_decl. */
31837 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
31838 DECL_CONTEXT (t) = decl;
31839 DECL_ARTIFICIAL (t) = 1;
31840 DECL_IGNORED_P (t) = 1;
31841 DECL_RESULT (decl) = t;
31843 gimplify_function_tree (decl);
31844 push_cfun (DECL_STRUCT_FUNCTION (decl));
31845 *empty_bb = init_lowered_empty_function (decl, false,
31846 profile_count::uninitialized ());
31848 cgraph_node::add_new_function (decl, true);
31849 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
31851 pop_cfun ();
31853 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
31854 DECL_ATTRIBUTES (dispatch_decl)
31855 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
31857 cgraph_node::create_same_body_alias (dispatch_decl, decl);
31859 return decl;
31862 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
31863 return a pointer to VERSION_DECL if we are running on a machine that
31864 supports the index CLONE_ISA hardware architecture bits. This function will
31865 be called during version dispatch to decide which function version to
31866 execute. It returns the basic block at the end, to which more conditions
31867 can be added. */
31869 static basic_block
31870 add_condition_to_bb (tree function_decl, tree version_decl,
31871 int clone_isa, basic_block new_bb)
31873 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
31875 gcc_assert (new_bb != NULL);
31876 gimple_seq gseq = bb_seq (new_bb);
31879 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
31880 build_fold_addr_expr (version_decl));
31881 tree result_var = create_tmp_var (ptr_type_node);
31882 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
31883 gimple *return_stmt = gimple_build_return (result_var);
31885 if (clone_isa == CLONE_DEFAULT)
31887 gimple_seq_add_stmt (&gseq, convert_stmt);
31888 gimple_seq_add_stmt (&gseq, return_stmt);
31889 set_bb_seq (new_bb, gseq);
31890 gimple_set_bb (convert_stmt, new_bb);
31891 gimple_set_bb (return_stmt, new_bb);
31892 pop_cfun ();
31893 return new_bb;
31896 tree bool_zero = build_int_cst (bool_int_type_node, 0);
31897 tree cond_var = create_tmp_var (bool_int_type_node);
31898 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
31899 const char *arg_str = rs6000_clone_map[clone_isa].name;
31900 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
31901 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
31902 gimple_call_set_lhs (call_cond_stmt, cond_var);
31904 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
31905 gimple_set_bb (call_cond_stmt, new_bb);
31906 gimple_seq_add_stmt (&gseq, call_cond_stmt);
31908 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
31909 NULL_TREE, NULL_TREE);
31910 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
31911 gimple_set_bb (if_else_stmt, new_bb);
31912 gimple_seq_add_stmt (&gseq, if_else_stmt);
31914 gimple_seq_add_stmt (&gseq, convert_stmt);
31915 gimple_seq_add_stmt (&gseq, return_stmt);
31916 set_bb_seq (new_bb, gseq);
31918 basic_block bb1 = new_bb;
31919 edge e12 = split_block (bb1, if_else_stmt);
31920 basic_block bb2 = e12->dest;
31921 e12->flags &= ~EDGE_FALLTHRU;
31922 e12->flags |= EDGE_TRUE_VALUE;
31924 edge e23 = split_block (bb2, return_stmt);
31925 gimple_set_bb (convert_stmt, bb2);
31926 gimple_set_bb (return_stmt, bb2);
31928 basic_block bb3 = e23->dest;
31929 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
31931 remove_edge (e23);
31932 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
31934 pop_cfun ();
31935 return bb3;
31938 /* This function generates the dispatch function for multi-versioned functions.
31939 DISPATCH_DECL is the function which will contain the dispatch logic.
31940 FNDECLS are the function choices for dispatch, and is a tree chain.
31941 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
31942 code is generated. */
31944 static int
31945 dispatch_function_versions (tree dispatch_decl,
31946 void *fndecls_p,
31947 basic_block *empty_bb)
31949 int ix;
31950 tree ele;
31951 vec<tree> *fndecls;
31952 tree clones[CLONE_MAX];
31954 if (TARGET_DEBUG_TARGET)
31955 fputs ("dispatch_function_versions, top\n", stderr);
31957 gcc_assert (dispatch_decl != NULL
31958 && fndecls_p != NULL
31959 && empty_bb != NULL);
31961 /* fndecls_p is actually a vector. */
31962 fndecls = static_cast<vec<tree> *> (fndecls_p);
31964 /* At least one more version other than the default. */
31965 gcc_assert (fndecls->length () >= 2);
31967 /* The first version in the vector is the default decl. */
31968 memset ((void *) clones, '\0', sizeof (clones));
31969 clones[CLONE_DEFAULT] = (*fndecls)[0];
31971 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
31972 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
31973 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
31974 recent glibc. If we ever need to call __builtin_cpu_init, we would need
31975 to insert the code here to do the call. */
31977 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
31979 int priority = rs6000_clone_priority (ele);
31980 if (!clones[priority])
31981 clones[priority] = ele;
31984 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
31985 if (clones[ix])
31987 if (TARGET_DEBUG_TARGET)
31988 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
31989 ix, get_decl_name (clones[ix]));
31991 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
31992 *empty_bb);
31995 return 0;
31998 /* Generate the dispatching code body to dispatch multi-versioned function
31999 DECL. The target hook is called to process the "target" attributes and
32000 provide the code to dispatch the right function at run-time. NODE points
32001 to the dispatcher decl whose body will be created. */
32003 static tree
32004 rs6000_generate_version_dispatcher_body (void *node_p)
32006 tree resolver;
32007 basic_block empty_bb;
32008 struct cgraph_node *node = (cgraph_node *) node_p;
32009 struct cgraph_function_version_info *ninfo = node->function_version ();
32011 if (ninfo->dispatcher_resolver)
32012 return ninfo->dispatcher_resolver;
32014 /* node is going to be an alias, so remove the finalized bit. */
32015 node->definition = false;
32017 /* The first version in the chain corresponds to the default version. */
32018 ninfo->dispatcher_resolver = resolver
32019 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
32021 if (TARGET_DEBUG_TARGET)
32022 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
32023 get_decl_name (resolver));
32025 push_cfun (DECL_STRUCT_FUNCTION (resolver));
32026 auto_vec<tree, 2> fn_ver_vec;
32028 for (struct cgraph_function_version_info *vinfo = ninfo->next;
32029 vinfo;
32030 vinfo = vinfo->next)
32032 struct cgraph_node *version = vinfo->this_node;
32033 /* Check for virtual functions here again, as by this time it should
32034 have been determined if this function needs a vtable index or
32035 not. This happens for methods in derived classes that override
32036 virtual methods in base classes but are not explicitly marked as
32037 virtual. */
32038 if (DECL_VINDEX (version->decl))
32039 sorry ("Virtual function multiversioning not supported");
32041 fn_ver_vec.safe_push (version->decl);
32044 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
32045 cgraph_edge::rebuild_edges ();
32046 pop_cfun ();
32047 return resolver;
32051 /* Hook to determine if one function can safely inline another. */
32053 static bool
32054 rs6000_can_inline_p (tree caller, tree callee)
32056 bool ret = false;
32057 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32058 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32060 /* If callee has no option attributes, then it is ok to inline. */
32061 if (!callee_tree)
32062 ret = true;
32064 /* If caller has no option attributes, but callee does then it is not ok to
32065 inline. */
32066 else if (!caller_tree)
32067 ret = false;
32069 else
32071 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32072 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32074 /* Callee's options should a subset of the caller's, i.e. a vsx function
32075 can inline an altivec function but a non-vsx function can't inline a
32076 vsx function. */
32077 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32078 == callee_opts->x_rs6000_isa_flags)
32079 ret = true;
32082 if (TARGET_DEBUG_TARGET)
32083 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32084 get_decl_name (caller), get_decl_name (callee),
32085 (ret ? "can" : "cannot"));
32087 return ret;
32090 /* Allocate a stack temp and fixup the address so it meets the particular
32091 memory requirements (either offetable or REG+REG addressing). */
32094 rs6000_allocate_stack_temp (machine_mode mode,
32095 bool offsettable_p,
32096 bool reg_reg_p)
32098 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32099 rtx addr = XEXP (stack, 0);
32100 int strict_p = reload_completed;
32102 if (!legitimate_indirect_address_p (addr, strict_p))
32104 if (offsettable_p
32105 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32106 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32108 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32109 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32112 return stack;
32115 /* Given a memory reference, if it is not a reg or reg+reg addressing,
32116 convert to such a form to deal with memory reference instructions
32117 like STFIWX and LDBRX that only take reg+reg addressing. */
32120 rs6000_force_indexed_or_indirect_mem (rtx x)
32122 machine_mode mode = GET_MODE (x);
32124 gcc_assert (MEM_P (x));
32125 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
32127 rtx addr = XEXP (x, 0);
32128 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32130 rtx reg = XEXP (addr, 0);
32131 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32132 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32133 gcc_assert (REG_P (reg));
32134 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32135 addr = reg;
32137 else if (GET_CODE (addr) == PRE_MODIFY)
32139 rtx reg = XEXP (addr, 0);
32140 rtx expr = XEXP (addr, 1);
32141 gcc_assert (REG_P (reg));
32142 gcc_assert (GET_CODE (expr) == PLUS);
32143 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32144 addr = reg;
32147 if (GET_CODE (addr) == PLUS)
32149 rtx op0 = XEXP (addr, 0);
32150 rtx op1 = XEXP (addr, 1);
32151 op0 = force_reg (Pmode, op0);
32152 op1 = force_reg (Pmode, op1);
32153 x = replace_equiv_address (x, gen_rtx_PLUS (Pmode, op0, op1));
32155 else
32156 x = replace_equiv_address (x, force_reg (Pmode, addr));
32159 return x;
32162 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32164 On the RS/6000, all integer constants are acceptable, most won't be valid
32165 for particular insns, though. Only easy FP constants are acceptable. */
32167 static bool
32168 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
32170 if (TARGET_ELF && tls_referenced_p (x))
32171 return false;
32173 if (CONST_DOUBLE_P (x))
32174 return easy_fp_constant (x, mode);
32176 if (GET_CODE (x) == CONST_VECTOR)
32177 return easy_vector_constant (x, mode);
32179 return true;
32183 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
32185 static bool
32186 chain_already_loaded (rtx_insn *last)
32188 for (; last != NULL; last = PREV_INSN (last))
32190 if (NONJUMP_INSN_P (last))
32192 rtx patt = PATTERN (last);
32194 if (GET_CODE (patt) == SET)
32196 rtx lhs = XEXP (patt, 0);
32198 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
32199 return true;
32203 return false;
32206 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32208 void
32209 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32211 rtx func = func_desc;
32212 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32213 rtx toc_load = NULL_RTX;
32214 rtx toc_restore = NULL_RTX;
32215 rtx func_addr;
32216 rtx abi_reg = NULL_RTX;
32217 rtx call[4];
32218 int n_call;
32219 rtx insn;
32220 bool is_pltseq_longcall;
32222 if (global_tlsarg)
32223 tlsarg = global_tlsarg;
32225 /* Handle longcall attributes. */
32226 is_pltseq_longcall = false;
32227 if ((INTVAL (cookie) & CALL_LONG) != 0
32228 && GET_CODE (func_desc) == SYMBOL_REF)
32230 func = rs6000_longcall_ref (func_desc, tlsarg);
32231 if (TARGET_PLTSEQ)
32232 is_pltseq_longcall = true;
32235 /* Handle indirect calls. */
32236 if (!SYMBOL_REF_P (func)
32237 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
32239 if (!rs6000_pcrel_p (cfun))
32241 /* Save the TOC into its reserved slot before the call,
32242 and prepare to restore it after the call. */
32243 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32244 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
32245 gen_rtvec (1, stack_toc_offset),
32246 UNSPEC_TOCSLOT);
32247 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
32249 /* Can we optimize saving the TOC in the prologue or
32250 do we need to do it at every call? */
32251 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32252 cfun->machine->save_toc_in_prologue = true;
32253 else
32255 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32256 rtx stack_toc_mem = gen_frame_mem (Pmode,
32257 gen_rtx_PLUS (Pmode, stack_ptr,
32258 stack_toc_offset));
32259 MEM_VOLATILE_P (stack_toc_mem) = 1;
32260 if (is_pltseq_longcall)
32262 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
32263 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32264 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
32266 else
32267 emit_move_insn (stack_toc_mem, toc_reg);
32271 if (DEFAULT_ABI == ABI_ELFv2)
32273 /* A function pointer in the ELFv2 ABI is just a plain address, but
32274 the ABI requires it to be loaded into r12 before the call. */
32275 func_addr = gen_rtx_REG (Pmode, 12);
32276 if (!rtx_equal_p (func_addr, func))
32277 emit_move_insn (func_addr, func);
32278 abi_reg = func_addr;
32279 /* Indirect calls via CTR are strongly preferred over indirect
32280 calls via LR, so move the address there. Needed to mark
32281 this insn for linker plt sequence editing too. */
32282 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32283 if (is_pltseq_longcall)
32285 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
32286 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32287 emit_insn (gen_rtx_SET (func_addr, mark_func));
32288 v = gen_rtvec (2, func_addr, func_desc);
32289 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32291 else
32292 emit_move_insn (func_addr, abi_reg);
32294 else
32296 /* A function pointer under AIX is a pointer to a data area whose
32297 first word contains the actual address of the function, whose
32298 second word contains a pointer to its TOC, and whose third word
32299 contains a value to place in the static chain register (r11).
32300 Note that if we load the static chain, our "trampoline" need
32301 not have any executable code. */
32303 /* Load up address of the actual function. */
32304 func = force_reg (Pmode, func);
32305 func_addr = gen_reg_rtx (Pmode);
32306 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
32308 /* Indirect calls via CTR are strongly preferred over indirect
32309 calls via LR, so move the address there. */
32310 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
32311 emit_move_insn (ctr_reg, func_addr);
32312 func_addr = ctr_reg;
32314 /* Prepare to load the TOC of the called function. Note that the
32315 TOC load must happen immediately before the actual call so
32316 that unwinding the TOC registers works correctly. See the
32317 comment in frob_update_context. */
32318 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32319 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32320 gen_rtx_PLUS (Pmode, func,
32321 func_toc_offset));
32322 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32324 /* If we have a static chain, load it up. But, if the call was
32325 originally direct, the 3rd word has not been written since no
32326 trampoline has been built, so we ought not to load it, lest we
32327 override a static chain value. */
32328 if (!(GET_CODE (func_desc) == SYMBOL_REF
32329 && SYMBOL_REF_FUNCTION_P (func_desc))
32330 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
32331 && !chain_already_loaded (get_current_sequence ()->next->last))
32333 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32334 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32335 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32336 gen_rtx_PLUS (Pmode, func,
32337 func_sc_offset));
32338 emit_move_insn (sc_reg, func_sc_mem);
32339 abi_reg = sc_reg;
32343 else
32345 /* No TOC register needed for calls from PC-relative callers. */
32346 if (!rs6000_pcrel_p (cfun))
32347 /* Direct calls use the TOC: for local calls, the callee will
32348 assume the TOC register is set; for non-local calls, the
32349 PLT stub needs the TOC register. */
32350 abi_reg = toc_reg;
32351 func_addr = func;
32354 /* Create the call. */
32355 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32356 if (value != NULL_RTX)
32357 call[0] = gen_rtx_SET (value, call[0]);
32358 n_call = 1;
32360 if (toc_load)
32361 call[n_call++] = toc_load;
32362 if (toc_restore)
32363 call[n_call++] = toc_restore;
32365 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32367 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32368 insn = emit_call_insn (insn);
32370 /* Mention all registers defined by the ABI to hold information
32371 as uses in CALL_INSN_FUNCTION_USAGE. */
32372 if (abi_reg)
32373 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32376 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32378 void
32379 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32381 rtx call[2];
32382 rtx insn;
32384 gcc_assert (INTVAL (cookie) == 0);
32386 if (global_tlsarg)
32387 tlsarg = global_tlsarg;
32389 /* Create the call. */
32390 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
32391 if (value != NULL_RTX)
32392 call[0] = gen_rtx_SET (value, call[0]);
32394 call[1] = simple_return_rtx;
32396 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32397 insn = emit_call_insn (insn);
32399 /* Note use of the TOC register. */
32400 if (!rs6000_pcrel_p (cfun))
32401 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
32402 gen_rtx_REG (Pmode, TOC_REGNUM));
32405 /* Expand code to perform a call under the SYSV4 ABI. */
32407 void
32408 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32410 rtx func = func_desc;
32411 rtx func_addr;
32412 rtx call[4];
32413 rtx insn;
32414 rtx abi_reg = NULL_RTX;
32415 int n;
32417 if (global_tlsarg)
32418 tlsarg = global_tlsarg;
32420 /* Handle longcall attributes. */
32421 if ((INTVAL (cookie) & CALL_LONG) != 0
32422 && GET_CODE (func_desc) == SYMBOL_REF)
32424 func = rs6000_longcall_ref (func_desc, tlsarg);
32425 /* If the longcall was implemented as an inline PLT call using
32426 PLT unspecs then func will be REG:r11. If not, func will be
32427 a pseudo reg. The inline PLT call sequence supports lazy
32428 linking (and longcalls to functions in dlopen'd libraries).
32429 The other style of longcalls don't. The lazy linking entry
32430 to the dynamic symbol resolver requires r11 be the function
32431 address (as it is for linker generated PLT stubs). Ensure
32432 r11 stays valid to the bctrl by marking r11 used by the call. */
32433 if (TARGET_PLTSEQ)
32434 abi_reg = func;
32437 /* Handle indirect calls. */
32438 if (GET_CODE (func) != SYMBOL_REF)
32440 func = force_reg (Pmode, func);
32442 /* Indirect calls via CTR are strongly preferred over indirect
32443 calls via LR, so move the address there. That can't be left
32444 to reload because we want to mark every instruction in an
32445 inline PLT call sequence with a reloc, enabling the linker to
32446 edit the sequence back to a direct call when that makes sense. */
32447 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32448 if (abi_reg)
32450 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32451 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32452 emit_insn (gen_rtx_SET (func_addr, mark_func));
32453 v = gen_rtvec (2, func_addr, func_desc);
32454 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32456 else
32457 emit_move_insn (func_addr, func);
32459 else
32460 func_addr = func;
32462 /* Create the call. */
32463 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32464 if (value != NULL_RTX)
32465 call[0] = gen_rtx_SET (value, call[0]);
32467 call[1] = gen_rtx_USE (VOIDmode, cookie);
32468 n = 2;
32469 if (TARGET_SECURE_PLT
32470 && flag_pic
32471 && GET_CODE (func_addr) == SYMBOL_REF
32472 && !SYMBOL_REF_LOCAL_P (func_addr))
32473 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
32475 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32477 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
32478 insn = emit_call_insn (insn);
32479 if (abi_reg)
32480 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32483 /* Expand code to perform a sibling call under the SysV4 ABI. */
32485 void
32486 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32488 rtx func = func_desc;
32489 rtx func_addr;
32490 rtx call[3];
32491 rtx insn;
32492 rtx abi_reg = NULL_RTX;
32494 if (global_tlsarg)
32495 tlsarg = global_tlsarg;
32497 /* Handle longcall attributes. */
32498 if ((INTVAL (cookie) & CALL_LONG) != 0
32499 && GET_CODE (func_desc) == SYMBOL_REF)
32501 func = rs6000_longcall_ref (func_desc, tlsarg);
32502 /* If the longcall was implemented as an inline PLT call using
32503 PLT unspecs then func will be REG:r11. If not, func will be
32504 a pseudo reg. The inline PLT call sequence supports lazy
32505 linking (and longcalls to functions in dlopen'd libraries).
32506 The other style of longcalls don't. The lazy linking entry
32507 to the dynamic symbol resolver requires r11 be the function
32508 address (as it is for linker generated PLT stubs). Ensure
32509 r11 stays valid to the bctr by marking r11 used by the call. */
32510 if (TARGET_PLTSEQ)
32511 abi_reg = func;
32514 /* Handle indirect calls. */
32515 if (GET_CODE (func) != SYMBOL_REF)
32517 func = force_reg (Pmode, func);
32519 /* Indirect sibcalls must go via CTR. That can't be left to
32520 reload because we want to mark every instruction in an inline
32521 PLT call sequence with a reloc, enabling the linker to edit
32522 the sequence back to a direct call when that makes sense. */
32523 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32524 if (abi_reg)
32526 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32527 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32528 emit_insn (gen_rtx_SET (func_addr, mark_func));
32529 v = gen_rtvec (2, func_addr, func_desc);
32530 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32532 else
32533 emit_move_insn (func_addr, func);
32535 else
32536 func_addr = func;
32538 /* Create the call. */
32539 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32540 if (value != NULL_RTX)
32541 call[0] = gen_rtx_SET (value, call[0]);
32543 call[1] = gen_rtx_USE (VOIDmode, cookie);
32544 call[2] = simple_return_rtx;
32546 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32547 insn = emit_call_insn (insn);
32548 if (abi_reg)
32549 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32552 #if TARGET_MACHO
32554 /* Expand code to perform a call under the Darwin ABI.
32555 Modulo handling of mlongcall, this is much the same as sysv.
32556 if/when the longcall optimisation is removed, we could drop this
32557 code and use the sysv case (taking care to avoid the tls stuff).
32559 We can use this for sibcalls too, if needed. */
32561 void
32562 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
32563 rtx cookie, bool sibcall)
32565 rtx func = func_desc;
32566 rtx func_addr;
32567 rtx call[3];
32568 rtx insn;
32569 int cookie_val = INTVAL (cookie);
32570 bool make_island = false;
32572 /* Handle longcall attributes, there are two cases for Darwin:
32573 1) Newer linkers are capable of synthesising any branch islands needed.
32574 2) We need a helper branch island synthesised by the compiler.
32575 The second case has mostly been retired and we don't use it for m64.
32576 In fact, it's is an optimisation, we could just indirect as sysv does..
32577 ... however, backwards compatibility for now.
32578 If we're going to use this, then we need to keep the CALL_LONG bit set,
32579 so that we can pick up the special insn form later. */
32580 if ((cookie_val & CALL_LONG) != 0
32581 && GET_CODE (func_desc) == SYMBOL_REF)
32583 /* FIXME: the longcall opt should not hang off picsymbol stubs. */
32584 if (darwin_picsymbol_stubs && TARGET_32BIT)
32585 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
32586 else
32588 /* The linker is capable of doing this, but the user explicitly
32589 asked for -mlongcall, so we'll do the 'normal' version. */
32590 func = rs6000_longcall_ref (func_desc, NULL_RTX);
32591 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
32595 /* Handle indirect calls. */
32596 if (GET_CODE (func) != SYMBOL_REF)
32598 func = force_reg (Pmode, func);
32600 /* Indirect calls via CTR are strongly preferred over indirect
32601 calls via LR, and are required for indirect sibcalls, so move
32602 the address there. */
32603 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32604 emit_move_insn (func_addr, func);
32606 else
32607 func_addr = func;
32609 /* Create the call. */
32610 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32611 if (value != NULL_RTX)
32612 call[0] = gen_rtx_SET (value, call[0]);
32614 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
32616 if (sibcall)
32617 call[2] = simple_return_rtx;
32618 else
32619 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32621 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32622 insn = emit_call_insn (insn);
32623 /* Now we have the debug info in the insn, we can set up the branch island
32624 if we're using one. */
32625 if (make_island)
32627 tree funname = get_identifier (XSTR (func_desc, 0));
32629 if (no_previous_def (funname))
32631 rtx label_rtx = gen_label_rtx ();
32632 char *label_buf, temp_buf[256];
32633 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32634 CODE_LABEL_NUMBER (label_rtx));
32635 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32636 tree labelname = get_identifier (label_buf);
32637 add_compiler_branch_island (labelname, funname,
32638 insn_line ((const rtx_insn*)insn));
32642 #endif
32644 void
32645 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32646 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32648 #if TARGET_MACHO
32649 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
32650 #else
32651 gcc_unreachable();
32652 #endif
32656 void
32657 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32658 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32660 #if TARGET_MACHO
32661 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
32662 #else
32663 gcc_unreachable();
32664 #endif
32667 /* Return whether we should generate PC-relative code for FNDECL. */
32668 bool
32669 rs6000_fndecl_pcrel_p (const_tree fndecl)
32671 if (DEFAULT_ABI != ABI_ELFv2)
32672 return false;
32674 struct cl_target_option *opts = target_opts_for_fn (fndecl);
32676 return ((opts->x_rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32677 && TARGET_CMODEL == CMODEL_MEDIUM);
32680 /* Return whether we should generate PC-relative code for *FN. */
32681 bool
32682 rs6000_pcrel_p (struct function *fn)
32684 if (DEFAULT_ABI != ABI_ELFv2)
32685 return false;
32687 /* Optimize usual case. */
32688 if (fn == cfun)
32689 return ((rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32690 && TARGET_CMODEL == CMODEL_MEDIUM);
32692 return rs6000_fndecl_pcrel_p (fn->decl);
32695 #ifdef HAVE_GAS_HIDDEN
32696 # define USE_HIDDEN_LINKONCE 1
32697 #else
32698 # define USE_HIDDEN_LINKONCE 0
32699 #endif
32701 /* Fills in the label name that should be used for a 476 link stack thunk. */
32703 void
32704 get_ppc476_thunk_name (char name[32])
32706 gcc_assert (TARGET_LINK_STACK);
32708 if (USE_HIDDEN_LINKONCE)
32709 sprintf (name, "__ppc476.get_thunk");
32710 else
32711 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32714 /* This function emits the simple thunk routine that is used to preserve
32715 the link stack on the 476 cpu. */
32717 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32718 static void
32719 rs6000_code_end (void)
32721 char name[32];
32722 tree decl;
32724 if (!TARGET_LINK_STACK)
32725 return;
32727 get_ppc476_thunk_name (name);
32729 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32730 build_function_type_list (void_type_node, NULL_TREE));
32731 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32732 NULL_TREE, void_type_node);
32733 TREE_PUBLIC (decl) = 1;
32734 TREE_STATIC (decl) = 1;
32736 #if RS6000_WEAK
32737 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
32739 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32740 targetm.asm_out.unique_section (decl, 0);
32741 switch_to_section (get_named_section (decl, NULL, 0));
32742 DECL_WEAK (decl) = 1;
32743 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32744 targetm.asm_out.globalize_label (asm_out_file, name);
32745 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32746 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32748 else
32749 #endif
32751 switch_to_section (text_section);
32752 ASM_OUTPUT_LABEL (asm_out_file, name);
32755 DECL_INITIAL (decl) = make_node (BLOCK);
32756 current_function_decl = decl;
32757 allocate_struct_function (decl, false);
32758 init_function_start (decl);
32759 first_function_block_is_cold = false;
32760 /* Make sure unwind info is emitted for the thunk if needed. */
32761 final_start_function (emit_barrier (), asm_out_file, 1);
32763 fputs ("\tblr\n", asm_out_file);
32765 final_end_function ();
32766 init_insn_lengths ();
32767 free_after_compilation (cfun);
32768 set_cfun (NULL);
32769 current_function_decl = NULL;
32772 /* Add r30 to hard reg set if the prologue sets it up and it is not
32773 pic_offset_table_rtx. */
32775 static void
32776 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32778 if (!TARGET_SINGLE_PIC_BASE
32779 && TARGET_TOC
32780 && TARGET_MINIMAL_TOC
32781 && !constant_pool_empty_p ())
32782 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32783 if (cfun->machine->split_stack_argp_used)
32784 add_to_hard_reg_set (&set->set, Pmode, 12);
32786 /* Make sure the hard reg set doesn't include r2, which was possibly added
32787 via PIC_OFFSET_TABLE_REGNUM. */
32788 if (TARGET_TOC)
32789 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
32793 /* Helper function for rs6000_split_logical to emit a logical instruction after
32794 spliting the operation to single GPR registers.
32796 DEST is the destination register.
32797 OP1 and OP2 are the input source registers.
32798 CODE is the base operation (AND, IOR, XOR, NOT).
32799 MODE is the machine mode.
32800 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32801 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32802 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32804 static void
32805 rs6000_split_logical_inner (rtx dest,
32806 rtx op1,
32807 rtx op2,
32808 enum rtx_code code,
32809 machine_mode mode,
32810 bool complement_final_p,
32811 bool complement_op1_p,
32812 bool complement_op2_p)
32814 rtx bool_rtx;
32816 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32817 if (op2 && CONST_INT_P (op2)
32818 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32819 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32821 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32822 HOST_WIDE_INT value = INTVAL (op2) & mask;
32824 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32825 if (code == AND)
32827 if (value == 0)
32829 emit_insn (gen_rtx_SET (dest, const0_rtx));
32830 return;
32833 else if (value == mask)
32835 if (!rtx_equal_p (dest, op1))
32836 emit_insn (gen_rtx_SET (dest, op1));
32837 return;
32841 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32842 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32843 else if (code == IOR || code == XOR)
32845 if (value == 0)
32847 if (!rtx_equal_p (dest, op1))
32848 emit_insn (gen_rtx_SET (dest, op1));
32849 return;
32854 if (code == AND && mode == SImode
32855 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32857 emit_insn (gen_andsi3 (dest, op1, op2));
32858 return;
32861 if (complement_op1_p)
32862 op1 = gen_rtx_NOT (mode, op1);
32864 if (complement_op2_p)
32865 op2 = gen_rtx_NOT (mode, op2);
32867 /* For canonical RTL, if only one arm is inverted it is the first. */
32868 if (!complement_op1_p && complement_op2_p)
32869 std::swap (op1, op2);
32871 bool_rtx = ((code == NOT)
32872 ? gen_rtx_NOT (mode, op1)
32873 : gen_rtx_fmt_ee (code, mode, op1, op2));
32875 if (complement_final_p)
32876 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32878 emit_insn (gen_rtx_SET (dest, bool_rtx));
32881 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32882 operations are split immediately during RTL generation to allow for more
32883 optimizations of the AND/IOR/XOR.
32885 OPERANDS is an array containing the destination and two input operands.
32886 CODE is the base operation (AND, IOR, XOR, NOT).
32887 MODE is the machine mode.
32888 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32889 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32890 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32891 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32892 formation of the AND instructions. */
32894 static void
32895 rs6000_split_logical_di (rtx operands[3],
32896 enum rtx_code code,
32897 bool complement_final_p,
32898 bool complement_op1_p,
32899 bool complement_op2_p)
32901 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32902 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32903 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32904 enum hi_lo { hi = 0, lo = 1 };
32905 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32906 size_t i;
32908 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32909 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32910 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32911 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32913 if (code == NOT)
32914 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32915 else
32917 if (!CONST_INT_P (operands[2]))
32919 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32920 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32922 else
32924 HOST_WIDE_INT value = INTVAL (operands[2]);
32925 HOST_WIDE_INT value_hi_lo[2];
32927 gcc_assert (!complement_final_p);
32928 gcc_assert (!complement_op1_p);
32929 gcc_assert (!complement_op2_p);
32931 value_hi_lo[hi] = value >> 32;
32932 value_hi_lo[lo] = value & lower_32bits;
32934 for (i = 0; i < 2; i++)
32936 HOST_WIDE_INT sub_value = value_hi_lo[i];
32938 if (sub_value & sign_bit)
32939 sub_value |= upper_32bits;
32941 op2_hi_lo[i] = GEN_INT (sub_value);
32943 /* If this is an AND instruction, check to see if we need to load
32944 the value in a register. */
32945 if (code == AND && sub_value != -1 && sub_value != 0
32946 && !and_operand (op2_hi_lo[i], SImode))
32947 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32952 for (i = 0; i < 2; i++)
32954 /* Split large IOR/XOR operations. */
32955 if ((code == IOR || code == XOR)
32956 && CONST_INT_P (op2_hi_lo[i])
32957 && !complement_final_p
32958 && !complement_op1_p
32959 && !complement_op2_p
32960 && !logical_const_operand (op2_hi_lo[i], SImode))
32962 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32963 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32964 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32965 rtx tmp = gen_reg_rtx (SImode);
32967 /* Make sure the constant is sign extended. */
32968 if ((hi_16bits & sign_bit) != 0)
32969 hi_16bits |= upper_32bits;
32971 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32972 code, SImode, false, false, false);
32974 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32975 code, SImode, false, false, false);
32977 else
32978 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32979 code, SImode, complement_final_p,
32980 complement_op1_p, complement_op2_p);
32983 return;
32986 /* Split the insns that make up boolean operations operating on multiple GPR
32987 registers. The boolean MD patterns ensure that the inputs either are
32988 exactly the same as the output registers, or there is no overlap.
32990 OPERANDS is an array containing the destination and two input operands.
32991 CODE is the base operation (AND, IOR, XOR, NOT).
32992 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32993 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32994 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32996 void
32997 rs6000_split_logical (rtx operands[3],
32998 enum rtx_code code,
32999 bool complement_final_p,
33000 bool complement_op1_p,
33001 bool complement_op2_p)
33003 machine_mode mode = GET_MODE (operands[0]);
33004 machine_mode sub_mode;
33005 rtx op0, op1, op2;
33006 int sub_size, regno0, regno1, nregs, i;
33008 /* If this is DImode, use the specialized version that can run before
33009 register allocation. */
33010 if (mode == DImode && !TARGET_POWERPC64)
33012 rs6000_split_logical_di (operands, code, complement_final_p,
33013 complement_op1_p, complement_op2_p);
33014 return;
33017 op0 = operands[0];
33018 op1 = operands[1];
33019 op2 = (code == NOT) ? NULL_RTX : operands[2];
33020 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33021 sub_size = GET_MODE_SIZE (sub_mode);
33022 regno0 = REGNO (op0);
33023 regno1 = REGNO (op1);
33025 gcc_assert (reload_completed);
33026 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33027 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33029 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33030 gcc_assert (nregs > 1);
33032 if (op2 && REG_P (op2))
33033 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33035 for (i = 0; i < nregs; i++)
33037 int offset = i * sub_size;
33038 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33039 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33040 rtx sub_op2 = ((code == NOT)
33041 ? NULL_RTX
33042 : simplify_subreg (sub_mode, op2, mode, offset));
33044 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33045 complement_final_p, complement_op1_p,
33046 complement_op2_p);
33049 return;
33053 /* Return true if the peephole2 can combine a load involving a combination of
33054 an addis instruction and a load with an offset that can be fused together on
33055 a power8. */
33057 bool
33058 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
33059 rtx addis_value, /* addis value. */
33060 rtx target, /* target register that is loaded. */
33061 rtx mem) /* bottom part of the memory addr. */
33063 rtx addr;
33064 rtx base_reg;
33066 /* Validate arguments. */
33067 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33068 return false;
33070 if (!base_reg_operand (target, GET_MODE (target)))
33071 return false;
33073 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33074 return false;
33076 /* Allow sign/zero extension. */
33077 if (GET_CODE (mem) == ZERO_EXTEND
33078 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33079 mem = XEXP (mem, 0);
33081 if (!MEM_P (mem))
33082 return false;
33084 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33085 return false;
33087 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33088 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33089 return false;
33091 /* Validate that the register used to load the high value is either the
33092 register being loaded, or we can safely replace its use.
33094 This function is only called from the peephole2 pass and we assume that
33095 there are 2 instructions in the peephole (addis and load), so we want to
33096 check if the target register was not used in the memory address and the
33097 register to hold the addis result is dead after the peephole. */
33098 if (REGNO (addis_reg) != REGNO (target))
33100 if (reg_mentioned_p (target, mem))
33101 return false;
33103 if (!peep2_reg_dead_p (2, addis_reg))
33104 return false;
33106 /* If the target register being loaded is the stack pointer, we must
33107 avoid loading any other value into it, even temporarily. */
33108 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33109 return false;
33112 base_reg = XEXP (addr, 0);
33113 return REGNO (addis_reg) == REGNO (base_reg);
33116 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33117 sequence. We adjust the addis register to use the target register. If the
33118 load sign extends, we adjust the code to do the zero extending load, and an
33119 explicit sign extension later since the fusion only covers zero extending
33120 loads.
33122 The operands are:
33123 operands[0] register set with addis (to be replaced with target)
33124 operands[1] value set via addis
33125 operands[2] target register being loaded
33126 operands[3] D-form memory reference using operands[0]. */
33128 void
33129 expand_fusion_gpr_load (rtx *operands)
33131 rtx addis_value = operands[1];
33132 rtx target = operands[2];
33133 rtx orig_mem = operands[3];
33134 rtx new_addr, new_mem, orig_addr, offset;
33135 enum rtx_code plus_or_lo_sum;
33136 machine_mode target_mode = GET_MODE (target);
33137 machine_mode extend_mode = target_mode;
33138 machine_mode ptr_mode = Pmode;
33139 enum rtx_code extend = UNKNOWN;
33141 if (GET_CODE (orig_mem) == ZERO_EXTEND
33142 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33144 extend = GET_CODE (orig_mem);
33145 orig_mem = XEXP (orig_mem, 0);
33146 target_mode = GET_MODE (orig_mem);
33149 gcc_assert (MEM_P (orig_mem));
33151 orig_addr = XEXP (orig_mem, 0);
33152 plus_or_lo_sum = GET_CODE (orig_addr);
33153 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33155 offset = XEXP (orig_addr, 1);
33156 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
33157 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
33159 if (extend != UNKNOWN)
33160 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33162 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
33163 UNSPEC_FUSION_GPR);
33164 emit_insn (gen_rtx_SET (target, new_mem));
33166 if (extend == SIGN_EXTEND)
33168 int sub_off = ((BYTES_BIG_ENDIAN)
33169 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33170 : 0);
33171 rtx sign_reg
33172 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33174 emit_insn (gen_rtx_SET (target,
33175 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33178 return;
33181 /* Emit the addis instruction that will be part of a fused instruction
33182 sequence. */
33184 void
33185 emit_fusion_addis (rtx target, rtx addis_value)
33187 rtx fuse_ops[10];
33188 const char *addis_str = NULL;
33190 /* Emit the addis instruction. */
33191 fuse_ops[0] = target;
33192 if (satisfies_constraint_L (addis_value))
33194 fuse_ops[1] = addis_value;
33195 addis_str = "lis %0,%v1";
33198 else if (GET_CODE (addis_value) == PLUS)
33200 rtx op0 = XEXP (addis_value, 0);
33201 rtx op1 = XEXP (addis_value, 1);
33203 if (REG_P (op0) && CONST_INT_P (op1)
33204 && satisfies_constraint_L (op1))
33206 fuse_ops[1] = op0;
33207 fuse_ops[2] = op1;
33208 addis_str = "addis %0,%1,%v2";
33212 else if (GET_CODE (addis_value) == HIGH)
33214 rtx value = XEXP (addis_value, 0);
33215 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33217 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33218 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33219 if (TARGET_ELF)
33220 addis_str = "addis %0,%2,%1@toc@ha";
33222 else if (TARGET_XCOFF)
33223 addis_str = "addis %0,%1@u(%2)";
33225 else
33226 gcc_unreachable ();
33229 else if (GET_CODE (value) == PLUS)
33231 rtx op0 = XEXP (value, 0);
33232 rtx op1 = XEXP (value, 1);
33234 if (GET_CODE (op0) == UNSPEC
33235 && XINT (op0, 1) == UNSPEC_TOCREL
33236 && CONST_INT_P (op1))
33238 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33239 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33240 fuse_ops[3] = op1;
33241 if (TARGET_ELF)
33242 addis_str = "addis %0,%2,%1+%3@toc@ha";
33244 else if (TARGET_XCOFF)
33245 addis_str = "addis %0,%1+%3@u(%2)";
33247 else
33248 gcc_unreachable ();
33252 else if (satisfies_constraint_L (value))
33254 fuse_ops[1] = value;
33255 addis_str = "lis %0,%v1";
33258 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33260 fuse_ops[1] = value;
33261 addis_str = "lis %0,%1@ha";
33265 if (!addis_str)
33266 fatal_insn ("Could not generate addis value for fusion", addis_value);
33268 output_asm_insn (addis_str, fuse_ops);
33271 /* Emit a D-form load or store instruction that is the second instruction
33272 of a fusion sequence. */
33274 static void
33275 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
33277 rtx fuse_ops[10];
33278 char insn_template[80];
33280 fuse_ops[0] = load_reg;
33281 fuse_ops[1] = addis_reg;
33283 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
33285 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
33286 fuse_ops[2] = offset;
33287 output_asm_insn (insn_template, fuse_ops);
33290 else if (GET_CODE (offset) == UNSPEC
33291 && XINT (offset, 1) == UNSPEC_TOCREL)
33293 if (TARGET_ELF)
33294 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
33296 else if (TARGET_XCOFF)
33297 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33299 else
33300 gcc_unreachable ();
33302 fuse_ops[2] = XVECEXP (offset, 0, 0);
33303 output_asm_insn (insn_template, fuse_ops);
33306 else if (GET_CODE (offset) == PLUS
33307 && GET_CODE (XEXP (offset, 0)) == UNSPEC
33308 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
33309 && CONST_INT_P (XEXP (offset, 1)))
33311 rtx tocrel_unspec = XEXP (offset, 0);
33312 if (TARGET_ELF)
33313 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
33315 else if (TARGET_XCOFF)
33316 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
33318 else
33319 gcc_unreachable ();
33321 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
33322 fuse_ops[3] = XEXP (offset, 1);
33323 output_asm_insn (insn_template, fuse_ops);
33326 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
33328 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33330 fuse_ops[2] = offset;
33331 output_asm_insn (insn_template, fuse_ops);
33334 else
33335 fatal_insn ("Unable to generate load/store offset for fusion", offset);
33337 return;
33340 /* Given an address, convert it into the addis and load offset parts. Addresses
33341 created during the peephole2 process look like:
33342 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
33343 (unspec [(...)] UNSPEC_TOCREL)) */
33345 static void
33346 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
33348 rtx hi, lo;
33350 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
33352 hi = XEXP (addr, 0);
33353 lo = XEXP (addr, 1);
33355 else
33356 gcc_unreachable ();
33358 *p_hi = hi;
33359 *p_lo = lo;
33362 /* Return a string to fuse an addis instruction with a gpr load to the same
33363 register that we loaded up the addis instruction. The address that is used
33364 is the logical address that was formed during peephole2:
33365 (lo_sum (high) (low-part))
33367 The code is complicated, so we call output_asm_insn directly, and just
33368 return "". */
33370 const char *
33371 emit_fusion_gpr_load (rtx target, rtx mem)
33373 rtx addis_value;
33374 rtx addr;
33375 rtx load_offset;
33376 const char *load_str = NULL;
33377 machine_mode mode;
33379 if (GET_CODE (mem) == ZERO_EXTEND)
33380 mem = XEXP (mem, 0);
33382 gcc_assert (REG_P (target) && MEM_P (mem));
33384 addr = XEXP (mem, 0);
33385 fusion_split_address (addr, &addis_value, &load_offset);
33387 /* Now emit the load instruction to the same register. */
33388 mode = GET_MODE (mem);
33389 switch (mode)
33391 case E_QImode:
33392 load_str = "lbz";
33393 break;
33395 case E_HImode:
33396 load_str = "lhz";
33397 break;
33399 case E_SImode:
33400 case E_SFmode:
33401 load_str = "lwz";
33402 break;
33404 case E_DImode:
33405 case E_DFmode:
33406 gcc_assert (TARGET_POWERPC64);
33407 load_str = "ld";
33408 break;
33410 default:
33411 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
33414 /* Emit the addis instruction. */
33415 emit_fusion_addis (target, addis_value);
33417 /* Emit the D-form load instruction. */
33418 emit_fusion_load (target, target, load_offset, load_str);
33420 return "";
33424 #ifdef RS6000_GLIBC_ATOMIC_FENV
33425 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
33426 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
33427 #endif
33429 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
33431 static void
33432 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
33434 if (!TARGET_HARD_FLOAT)
33436 #ifdef RS6000_GLIBC_ATOMIC_FENV
33437 if (atomic_hold_decl == NULL_TREE)
33439 atomic_hold_decl
33440 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33441 get_identifier ("__atomic_feholdexcept"),
33442 build_function_type_list (void_type_node,
33443 double_ptr_type_node,
33444 NULL_TREE));
33445 TREE_PUBLIC (atomic_hold_decl) = 1;
33446 DECL_EXTERNAL (atomic_hold_decl) = 1;
33449 if (atomic_clear_decl == NULL_TREE)
33451 atomic_clear_decl
33452 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33453 get_identifier ("__atomic_feclearexcept"),
33454 build_function_type_list (void_type_node,
33455 NULL_TREE));
33456 TREE_PUBLIC (atomic_clear_decl) = 1;
33457 DECL_EXTERNAL (atomic_clear_decl) = 1;
33460 tree const_double = build_qualified_type (double_type_node,
33461 TYPE_QUAL_CONST);
33462 tree const_double_ptr = build_pointer_type (const_double);
33463 if (atomic_update_decl == NULL_TREE)
33465 atomic_update_decl
33466 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33467 get_identifier ("__atomic_feupdateenv"),
33468 build_function_type_list (void_type_node,
33469 const_double_ptr,
33470 NULL_TREE));
33471 TREE_PUBLIC (atomic_update_decl) = 1;
33472 DECL_EXTERNAL (atomic_update_decl) = 1;
33475 tree fenv_var = create_tmp_var_raw (double_type_node);
33476 TREE_ADDRESSABLE (fenv_var) = 1;
33477 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
33479 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
33480 *clear = build_call_expr (atomic_clear_decl, 0);
33481 *update = build_call_expr (atomic_update_decl, 1,
33482 fold_convert (const_double_ptr, fenv_addr));
33483 #endif
33484 return;
33487 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
33488 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
33489 tree call_mffs = build_call_expr (mffs, 0);
33491 /* Generates the equivalent of feholdexcept (&fenv_var)
33493 *fenv_var = __builtin_mffs ();
33494 double fenv_hold;
33495 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
33496 __builtin_mtfsf (0xff, fenv_hold); */
33498 /* Mask to clear everything except for the rounding modes and non-IEEE
33499 arithmetic flag. */
33500 const unsigned HOST_WIDE_INT hold_exception_mask =
33501 HOST_WIDE_INT_C (0xffffffff00000007);
33503 tree fenv_var = create_tmp_var_raw (double_type_node);
33505 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
33507 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
33508 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33509 build_int_cst (uint64_type_node,
33510 hold_exception_mask));
33512 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33513 fenv_llu_and);
33515 tree hold_mtfsf = build_call_expr (mtfsf, 2,
33516 build_int_cst (unsigned_type_node, 0xff),
33517 fenv_hold_mtfsf);
33519 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
33521 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
33523 double fenv_clear = __builtin_mffs ();
33524 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
33525 __builtin_mtfsf (0xff, fenv_clear); */
33527 /* Mask to clear everything except for the rounding modes and non-IEEE
33528 arithmetic flag. */
33529 const unsigned HOST_WIDE_INT clear_exception_mask =
33530 HOST_WIDE_INT_C (0xffffffff00000000);
33532 tree fenv_clear = create_tmp_var_raw (double_type_node);
33534 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
33536 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
33537 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
33538 fenv_clean_llu,
33539 build_int_cst (uint64_type_node,
33540 clear_exception_mask));
33542 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33543 fenv_clear_llu_and);
33545 tree clear_mtfsf = build_call_expr (mtfsf, 2,
33546 build_int_cst (unsigned_type_node, 0xff),
33547 fenv_clear_mtfsf);
33549 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
33551 /* Generates the equivalent of feupdateenv (&fenv_var)
33553 double old_fenv = __builtin_mffs ();
33554 double fenv_update;
33555 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
33556 (*(uint64_t*)fenv_var 0x1ff80fff);
33557 __builtin_mtfsf (0xff, fenv_update); */
33559 const unsigned HOST_WIDE_INT update_exception_mask =
33560 HOST_WIDE_INT_C (0xffffffff1fffff00);
33561 const unsigned HOST_WIDE_INT new_exception_mask =
33562 HOST_WIDE_INT_C (0x1ff80fff);
33564 tree old_fenv = create_tmp_var_raw (double_type_node);
33565 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
33567 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
33568 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
33569 build_int_cst (uint64_type_node,
33570 update_exception_mask));
33572 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33573 build_int_cst (uint64_type_node,
33574 new_exception_mask));
33576 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
33577 old_llu_and, new_llu_and);
33579 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33580 new_llu_mask);
33582 tree update_mtfsf = build_call_expr (mtfsf, 2,
33583 build_int_cst (unsigned_type_node, 0xff),
33584 fenv_update_mtfsf);
33586 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
33589 void
33590 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
33592 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33594 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33595 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33597 /* The destination of the vmrgew instruction layout is:
33598 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33599 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33600 vmrgew instruction will be correct. */
33601 if (BYTES_BIG_ENDIAN)
33603 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
33604 GEN_INT (0)));
33605 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
33606 GEN_INT (3)));
33608 else
33610 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
33611 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
33614 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33615 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33617 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
33618 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
33620 if (BYTES_BIG_ENDIAN)
33621 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33622 else
33623 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33626 void
33627 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
33629 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33631 rtx_tmp0 = gen_reg_rtx (V2DImode);
33632 rtx_tmp1 = gen_reg_rtx (V2DImode);
33634 /* The destination of the vmrgew instruction layout is:
33635 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33636 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33637 vmrgew instruction will be correct. */
33638 if (BYTES_BIG_ENDIAN)
33640 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
33641 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
33643 else
33645 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
33646 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
33649 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33650 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33652 if (signed_convert)
33654 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
33655 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
33657 else
33659 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
33660 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
33663 if (BYTES_BIG_ENDIAN)
33664 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33665 else
33666 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33669 void
33670 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
33671 rtx src2)
33673 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33675 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33676 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33678 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
33679 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
33681 rtx_tmp2 = gen_reg_rtx (V4SImode);
33682 rtx_tmp3 = gen_reg_rtx (V4SImode);
33684 if (signed_convert)
33686 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
33687 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
33689 else
33691 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
33692 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
33695 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
33698 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
33700 static bool
33701 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
33702 optimization_type opt_type)
33704 switch (op)
33706 case rsqrt_optab:
33707 return (opt_type == OPTIMIZE_FOR_SPEED
33708 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
33710 default:
33711 return true;
33715 /* Implement TARGET_CONSTANT_ALIGNMENT. */
33717 static HOST_WIDE_INT
33718 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
33720 if (TREE_CODE (exp) == STRING_CST
33721 && (STRICT_ALIGNMENT || !optimize_size))
33722 return MAX (align, BITS_PER_WORD);
33723 return align;
33726 /* Implement TARGET_STARTING_FRAME_OFFSET. */
33728 static HOST_WIDE_INT
33729 rs6000_starting_frame_offset (void)
33731 if (FRAME_GROWS_DOWNWARD)
33732 return 0;
33733 return RS6000_STARTING_FRAME_OFFSET;
33737 /* Create an alias for a mangled name where we have changed the mangling (in
33738 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
33739 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
33741 #if TARGET_ELF && RS6000_WEAK
33742 static void
33743 rs6000_globalize_decl_name (FILE * stream, tree decl)
33745 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33747 targetm.asm_out.globalize_label (stream, name);
33749 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
33751 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
33752 const char *old_name;
33754 ieee128_mangling_gcc_8_1 = true;
33755 lang_hooks.set_decl_assembler_name (decl);
33756 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33757 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
33758 ieee128_mangling_gcc_8_1 = false;
33760 if (strcmp (name, old_name) != 0)
33762 fprintf (stream, "\t.weak %s\n", old_name);
33763 fprintf (stream, "\t.set %s,%s\n", old_name, name);
33767 #endif
33770 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
33771 function names from <foo>l to <foo>f128 if the default long double type is
33772 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
33773 include file switches the names on systems that support long double as IEEE
33774 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
33775 In the future, glibc will export names like __ieee128_sinf128 and we can
33776 switch to using those instead of using sinf128, which pollutes the user's
33777 namespace.
33779 This will switch the names for Fortran math functions as well (which doesn't
33780 use math.h). However, Fortran needs other changes to the compiler and
33781 library before you can switch the real*16 type at compile time.
33783 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
33784 only do this if the default is that long double is IBM extended double, and
33785 the user asked for IEEE 128-bit. */
33787 static tree
33788 rs6000_mangle_decl_assembler_name (tree decl, tree id)
33790 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
33791 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
33793 size_t len = IDENTIFIER_LENGTH (id);
33794 const char *name = IDENTIFIER_POINTER (id);
33796 if (name[len - 1] == 'l')
33798 bool uses_ieee128_p = false;
33799 tree type = TREE_TYPE (decl);
33800 machine_mode ret_mode = TYPE_MODE (type);
33802 /* See if the function returns a IEEE 128-bit floating point type or
33803 complex type. */
33804 if (ret_mode == TFmode || ret_mode == TCmode)
33805 uses_ieee128_p = true;
33806 else
33808 function_args_iterator args_iter;
33809 tree arg;
33811 /* See if the function passes a IEEE 128-bit floating point type
33812 or complex type. */
33813 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
33815 machine_mode arg_mode = TYPE_MODE (arg);
33816 if (arg_mode == TFmode || arg_mode == TCmode)
33818 uses_ieee128_p = true;
33819 break;
33824 /* If we passed or returned an IEEE 128-bit floating point type,
33825 change the name. */
33826 if (uses_ieee128_p)
33828 char *name2 = (char *) alloca (len + 4);
33829 memcpy (name2, name, len - 1);
33830 strcpy (name2 + len - 1, "f128");
33831 id = get_identifier (name2);
33836 return id;
33839 /* Predict whether the given loop in gimple will be transformed in the RTL
33840 doloop_optimize pass. */
33842 static bool
33843 rs6000_predict_doloop_p (struct loop *loop)
33845 gcc_assert (loop);
33847 /* On rs6000, targetm.can_use_doloop_p is actually
33848 can_use_doloop_if_innermost. Just ensure the loop is innermost. */
33849 if (loop->inner != NULL)
33851 if (dump_file && (dump_flags & TDF_DETAILS))
33852 fprintf (dump_file, "Predict doloop failure due to"
33853 " loop nesting.\n");
33854 return false;
33857 return true;
33860 struct gcc_target targetm = TARGET_INITIALIZER;
33862 #include "gt-rs6000.h"