Daily bump.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobc74e2a39d80dcb9649876f683f434cbb5c4c3070
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
106 #define min(A,B) ((A) < (B) ? (A) : (B))
107 #define max(A,B) ((A) > (B) ? (A) : (B))
109 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
111 /* Structure used to define the rs6000 stack */
112 typedef struct rs6000_stack {
113 int reload_completed; /* stack info won't change from here on */
114 int first_gp_reg_save; /* first callee saved GP register used */
115 int first_fp_reg_save; /* first callee saved FP register used */
116 int first_altivec_reg_save; /* first callee saved AltiVec register used */
117 int lr_save_p; /* true if the link reg needs to be saved */
118 int cr_save_p; /* true if the CR reg needs to be saved */
119 unsigned int vrsave_mask; /* mask of vec registers to save */
120 int push_p; /* true if we need to allocate stack space */
121 int calls_p; /* true if the function makes any calls */
122 int world_save_p; /* true if we're saving *everything*:
123 r13-r31, cr, f14-f31, vrsave, v20-v31 */
124 enum rs6000_abi abi; /* which ABI to use */
125 int gp_save_offset; /* offset to save GP regs from initial SP */
126 int fp_save_offset; /* offset to save FP regs from initial SP */
127 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
128 int lr_save_offset; /* offset to save LR from initial SP */
129 int cr_save_offset; /* offset to save CR from initial SP */
130 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
131 int varargs_save_offset; /* offset to save the varargs registers */
132 int ehrd_offset; /* offset to EH return data */
133 int ehcr_offset; /* offset to EH CR field data */
134 int reg_size; /* register size (4 or 8) */
135 HOST_WIDE_INT vars_size; /* variable save area size */
136 int parm_size; /* outgoing parameter size */
137 int save_size; /* save area size */
138 int fixed_size; /* fixed size of stack frame */
139 int gp_size; /* size of saved GP registers */
140 int fp_size; /* size of saved FP registers */
141 int altivec_size; /* size of saved AltiVec registers */
142 int cr_size; /* size to hold CR if not in fixed area */
143 int vrsave_size; /* size to hold VRSAVE */
144 int altivec_padding_size; /* size of altivec alignment padding */
145 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
146 int savres_strategy;
147 } rs6000_stack_t;
149 /* A C structure for machine-specific, per-function data.
150 This is added to the cfun structure. */
151 typedef struct GTY(()) machine_function
153 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
154 int ra_needs_full_frame;
155 /* Flags if __builtin_return_address (0) was used. */
156 int ra_need_lr;
157 /* Cache lr_save_p after expansion of builtin_eh_return. */
158 int lr_save_state;
159 /* Whether we need to save the TOC to the reserved stack location in the
160 function prologue. */
161 bool save_toc_in_prologue;
162 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
163 varargs save area. */
164 HOST_WIDE_INT varargs_save_offset;
165 /* Alternative internal arg pointer for -fsplit-stack. */
166 rtx split_stack_arg_pointer;
167 bool split_stack_argp_used;
168 /* Flag if r2 setup is needed with ELFv2 ABI. */
169 bool r2_setup_needed;
170 /* The number of components we use for separate shrink-wrapping. */
171 int n_components;
172 /* The components already handled by separate shrink-wrapping, which should
173 not be considered by the prologue and epilogue. */
174 bool gpr_is_wrapped_separately[32];
175 bool fpr_is_wrapped_separately[32];
176 bool lr_is_wrapped_separately;
177 bool toc_is_wrapped_separately;
178 } machine_function;
180 /* Support targetm.vectorize.builtin_mask_for_load. */
181 static GTY(()) tree altivec_builtin_mask_for_load;
183 /* Set to nonzero once AIX common-mode calls have been defined. */
184 static GTY(()) int common_mode_defined;
186 /* Label number of label created for -mrelocatable, to call to so we can
187 get the address of the GOT section */
188 static int rs6000_pic_labelno;
190 #ifdef USING_ELFOS_H
191 /* Counter for labels which are to be placed in .fixup. */
192 int fixuplabelno = 0;
193 #endif
195 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
196 int dot_symbols;
198 /* Specify the machine mode that pointers have. After generation of rtl, the
199 compiler makes no further distinction between pointers and any other objects
200 of this machine mode. */
201 scalar_int_mode rs6000_pmode;
203 /* Width in bits of a pointer. */
204 unsigned rs6000_pointer_size;
206 #ifdef HAVE_AS_GNU_ATTRIBUTE
207 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
208 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
209 # endif
210 /* Flag whether floating point values have been passed/returned.
211 Note that this doesn't say whether fprs are used, since the
212 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
213 should be set for soft-float values passed in gprs and ieee128
214 values passed in vsx registers. */
215 static bool rs6000_passes_float;
216 static bool rs6000_passes_long_double;
217 /* Flag whether vector values have been passed/returned. */
218 static bool rs6000_passes_vector;
219 /* Flag whether small (<= 8 byte) structures have been returned. */
220 static bool rs6000_returns_struct;
221 #endif
223 /* Value is TRUE if register/mode pair is acceptable. */
224 static bool rs6000_hard_regno_mode_ok_p
225 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
227 /* Maximum number of registers needed for a given register class and mode. */
228 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
230 /* How many registers are needed for a given register and mode. */
231 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
233 /* Map register number to register class. */
234 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
236 static int dbg_cost_ctrl;
238 /* Built in types. */
239 tree rs6000_builtin_types[RS6000_BTI_MAX];
240 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
242 /* Flag to say the TOC is initialized */
243 int toc_initialized, need_toc_init;
244 char toc_label_name[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more;
250 static GTY(()) section *read_only_data_section;
251 static GTY(()) section *private_data_section;
252 static GTY(()) section *tls_data_section;
253 static GTY(()) section *tls_private_data_section;
254 static GTY(()) section *read_only_private_data_section;
255 static GTY(()) section *sdata2_section;
256 static GTY(()) section *toc_section;
258 struct builtin_description
260 const HOST_WIDE_INT mask;
261 const enum insn_code icode;
262 const char *const name;
263 const enum rs6000_builtins code;
266 /* Describe the vector unit used for modes. */
267 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
268 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
270 /* Register classes for various constraints that are based on the target
271 switches. */
272 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
274 /* Describe the alignment of a vector. */
275 int rs6000_vector_align[NUM_MACHINE_MODES];
277 /* Map selected modes to types for builtins. */
278 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
280 /* What modes to automatically generate reciprocal divide estimate (fre) and
281 reciprocal sqrt (frsqrte) for. */
282 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
284 /* Masks to determine which reciprocal esitmate instructions to generate
285 automatically. */
286 enum rs6000_recip_mask {
287 RECIP_SF_DIV = 0x001, /* Use divide estimate */
288 RECIP_DF_DIV = 0x002,
289 RECIP_V4SF_DIV = 0x004,
290 RECIP_V2DF_DIV = 0x008,
292 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
293 RECIP_DF_RSQRT = 0x020,
294 RECIP_V4SF_RSQRT = 0x040,
295 RECIP_V2DF_RSQRT = 0x080,
297 /* Various combination of flags for -mrecip=xxx. */
298 RECIP_NONE = 0,
299 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
301 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
303 RECIP_HIGH_PRECISION = RECIP_ALL,
305 /* On low precision machines like the power5, don't enable double precision
306 reciprocal square root estimate, since it isn't accurate enough. */
307 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
310 /* -mrecip options. */
311 static struct
313 const char *string; /* option name */
314 unsigned int mask; /* mask bits to set */
315 } recip_options[] = {
316 { "all", RECIP_ALL },
317 { "none", RECIP_NONE },
318 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
319 | RECIP_V2DF_DIV) },
320 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
321 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
322 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
323 | RECIP_V2DF_RSQRT) },
324 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
325 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
328 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
329 static const struct
331 const char *cpu;
332 unsigned int cpuid;
333 } cpu_is_info[] = {
334 { "power9", PPC_PLATFORM_POWER9 },
335 { "power8", PPC_PLATFORM_POWER8 },
336 { "power7", PPC_PLATFORM_POWER7 },
337 { "power6x", PPC_PLATFORM_POWER6X },
338 { "power6", PPC_PLATFORM_POWER6 },
339 { "power5+", PPC_PLATFORM_POWER5_PLUS },
340 { "power5", PPC_PLATFORM_POWER5 },
341 { "ppc970", PPC_PLATFORM_PPC970 },
342 { "power4", PPC_PLATFORM_POWER4 },
343 { "ppca2", PPC_PLATFORM_PPCA2 },
344 { "ppc476", PPC_PLATFORM_PPC476 },
345 { "ppc464", PPC_PLATFORM_PPC464 },
346 { "ppc440", PPC_PLATFORM_PPC440 },
347 { "ppc405", PPC_PLATFORM_PPC405 },
348 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
351 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
352 static const struct
354 const char *hwcap;
355 int mask;
356 unsigned int id;
357 } cpu_supports_info[] = {
358 /* AT_HWCAP masks. */
359 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
360 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
361 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
362 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
363 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
364 { "booke", PPC_FEATURE_BOOKE, 0 },
365 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
366 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
367 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
368 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
369 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
370 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
371 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
372 { "notb", PPC_FEATURE_NO_TB, 0 },
373 { "pa6t", PPC_FEATURE_PA6T, 0 },
374 { "power4", PPC_FEATURE_POWER4, 0 },
375 { "power5", PPC_FEATURE_POWER5, 0 },
376 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
377 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
378 { "ppc32", PPC_FEATURE_32, 0 },
379 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
380 { "ppc64", PPC_FEATURE_64, 0 },
381 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
382 { "smt", PPC_FEATURE_SMT, 0 },
383 { "spe", PPC_FEATURE_HAS_SPE, 0 },
384 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
385 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
386 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
388 /* AT_HWCAP2 masks. */
389 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
390 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
391 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
392 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
393 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
394 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
395 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
396 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
397 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
398 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
399 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
400 { "darn", PPC_FEATURE2_DARN, 1 },
401 { "scv", PPC_FEATURE2_SCV, 1 }
404 /* On PowerPC, we have a limited number of target clones that we care about
405 which means we can use an array to hold the options, rather than having more
406 elaborate data structures to identify each possible variation. Order the
407 clones from the default to the highest ISA. */
408 enum {
409 CLONE_DEFAULT = 0, /* default clone. */
410 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
411 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
412 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
413 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
414 CLONE_MAX
417 /* Map compiler ISA bits into HWCAP names. */
418 struct clone_map {
419 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
420 const char *name; /* name to use in __builtin_cpu_supports. */
423 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
424 { 0, "" }, /* Default options. */
425 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
426 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
427 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
428 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
432 /* Newer LIBCs explicitly export this symbol to declare that they provide
433 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
434 reference to this symbol whenever we expand a CPU builtin, so that
435 we never link against an old LIBC. */
436 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
438 /* True if we have expanded a CPU builtin. */
439 bool cpu_builtin_p;
441 /* Pointer to function (in rs6000-c.c) that can define or undefine target
442 macros that have changed. Languages that don't support the preprocessor
443 don't link in rs6000-c.c, so we can't call it directly. */
444 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
446 /* Simplfy register classes into simpler classifications. We assume
447 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
448 check for standard register classes (gpr/floating/altivec/vsx) and
449 floating/vector classes (float/altivec/vsx). */
451 enum rs6000_reg_type {
452 NO_REG_TYPE,
453 PSEUDO_REG_TYPE,
454 GPR_REG_TYPE,
455 VSX_REG_TYPE,
456 ALTIVEC_REG_TYPE,
457 FPR_REG_TYPE,
458 SPR_REG_TYPE,
459 CR_REG_TYPE
462 /* Map register class to register type. */
463 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
465 /* First/last register type for the 'normal' register types (i.e. general
466 purpose, floating point, altivec, and VSX registers). */
467 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
469 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
472 /* Register classes we care about in secondary reload or go if legitimate
473 address. We only need to worry about GPR, FPR, and Altivec registers here,
474 along an ANY field that is the OR of the 3 register classes. */
476 enum rs6000_reload_reg_type {
477 RELOAD_REG_GPR, /* General purpose registers. */
478 RELOAD_REG_FPR, /* Traditional floating point regs. */
479 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
480 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
481 N_RELOAD_REG
484 /* For setting up register classes, loop through the 3 register classes mapping
485 into real registers, and skip the ANY class, which is just an OR of the
486 bits. */
487 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
488 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
490 /* Map reload register type to a register in the register class. */
491 struct reload_reg_map_type {
492 const char *name; /* Register class name. */
493 int reg; /* Register in the register class. */
496 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
497 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
498 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
499 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
500 { "Any", -1 }, /* RELOAD_REG_ANY. */
503 /* Mask bits for each register class, indexed per mode. Historically the
504 compiler has been more restrictive which types can do PRE_MODIFY instead of
505 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
506 typedef unsigned char addr_mask_type;
508 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
509 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
510 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
511 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
512 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
513 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
514 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
515 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
517 /* Register type masks based on the type, of valid addressing modes. */
518 struct rs6000_reg_addr {
519 enum insn_code reload_load; /* INSN to reload for loading. */
520 enum insn_code reload_store; /* INSN to reload for storing. */
521 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
522 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
523 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
524 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
525 /* INSNs for fusing addi with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
529 /* INSNs for fusing addis with loads
530 or stores for each reg. class. */
531 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
532 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
533 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
534 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
535 bool fused_toc; /* Mode supports TOC fusion. */
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
556 /* Given that there exists at least one variable that is set (produced)
557 by OUT_INSN and read (consumed) by IN_INSN, return true iff
558 IN_INSN represents one or more memory store operations and none of
559 the variables set by OUT_INSN is used by IN_INSN as the address of a
560 store operation. If either IN_INSN or OUT_INSN does not represent
561 a "single" RTL SET expression (as loosely defined by the
562 implementation of the single_set function) or a PARALLEL with only
563 SETs, CLOBBERs, and USEs inside, this function returns false.
565 This rs6000-specific version of store_data_bypass_p checks for
566 certain conditions that result in assertion failures (and internal
567 compiler errors) in the generic store_data_bypass_p function and
568 returns false rather than calling store_data_bypass_p if one of the
569 problematic conditions is detected. */
572 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
574 rtx out_set, in_set;
575 rtx out_pat, in_pat;
576 rtx out_exp, in_exp;
577 int i, j;
579 in_set = single_set (in_insn);
580 if (in_set)
582 if (MEM_P (SET_DEST (in_set)))
584 out_set = single_set (out_insn);
585 if (!out_set)
587 out_pat = PATTERN (out_insn);
588 if (GET_CODE (out_pat) == PARALLEL)
590 for (i = 0; i < XVECLEN (out_pat, 0); i++)
592 out_exp = XVECEXP (out_pat, 0, i);
593 if ((GET_CODE (out_exp) == CLOBBER)
594 || (GET_CODE (out_exp) == USE))
595 continue;
596 else if (GET_CODE (out_exp) != SET)
597 return false;
603 else
605 in_pat = PATTERN (in_insn);
606 if (GET_CODE (in_pat) != PARALLEL)
607 return false;
609 for (i = 0; i < XVECLEN (in_pat, 0); i++)
611 in_exp = XVECEXP (in_pat, 0, i);
612 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
613 continue;
614 else if (GET_CODE (in_exp) != SET)
615 return false;
617 if (MEM_P (SET_DEST (in_exp)))
619 out_set = single_set (out_insn);
620 if (!out_set)
622 out_pat = PATTERN (out_insn);
623 if (GET_CODE (out_pat) != PARALLEL)
624 return false;
625 for (j = 0; j < XVECLEN (out_pat, 0); j++)
627 out_exp = XVECEXP (out_pat, 0, j);
628 if ((GET_CODE (out_exp) == CLOBBER)
629 || (GET_CODE (out_exp) == USE))
630 continue;
631 else if (GET_CODE (out_exp) != SET)
632 return false;
638 return store_data_bypass_p (out_insn, in_insn);
641 /* Return true if we have D-form addressing in altivec registers. */
642 static inline bool
643 mode_supports_vmx_dform (machine_mode mode)
645 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
648 /* Return true if we have D-form addressing in VSX registers. This addressing
649 is more limited than normal d-form addressing in that the offset must be
650 aligned on a 16-byte boundary. */
651 static inline bool
652 mode_supports_vsx_dform_quad (machine_mode mode)
654 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
655 != 0);
659 /* Processor costs (relative to an add) */
661 const struct processor_costs *rs6000_cost;
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_Q
1275 #undef RS6000_BUILTIN_X
1277 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 struct rs6000_builtin_info_type {
1308 const char *name;
1309 const enum insn_code icode;
1310 const HOST_WIDE_INT mask;
1311 const unsigned attr;
1314 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1316 #include "rs6000-builtin.def"
1319 #undef RS6000_BUILTIN_0
1320 #undef RS6000_BUILTIN_1
1321 #undef RS6000_BUILTIN_2
1322 #undef RS6000_BUILTIN_3
1323 #undef RS6000_BUILTIN_A
1324 #undef RS6000_BUILTIN_D
1325 #undef RS6000_BUILTIN_H
1326 #undef RS6000_BUILTIN_P
1327 #undef RS6000_BUILTIN_Q
1328 #undef RS6000_BUILTIN_X
1330 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1331 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1334 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1335 static struct machine_function * rs6000_init_machine_status (void);
1336 static int rs6000_ra_ever_killed (void);
1337 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1338 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1339 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1341 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1342 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1343 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1344 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1345 bool);
1346 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1347 unsigned int);
1348 static bool is_microcoded_insn (rtx_insn *);
1349 static bool is_nonpipeline_insn (rtx_insn *);
1350 static bool is_cracked_insn (rtx_insn *);
1351 static bool is_load_insn (rtx, rtx *);
1352 static bool is_store_insn (rtx, rtx *);
1353 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1354 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1355 static bool insn_must_be_first_in_group (rtx_insn *);
1356 static bool insn_must_be_last_in_group (rtx_insn *);
1357 static void altivec_init_builtins (void);
1358 static tree builtin_function_type (machine_mode, machine_mode,
1359 machine_mode, machine_mode,
1360 enum rs6000_builtins, const char *name);
1361 static void rs6000_common_init_builtins (void);
1362 static void paired_init_builtins (void);
1363 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1364 static void htm_init_builtins (void);
1365 static rs6000_stack_t *rs6000_stack_info (void);
1366 static void is_altivec_return_reg (rtx, void *);
1367 int easy_vector_constant (rtx, machine_mode);
1368 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1369 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1370 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1371 bool, bool);
1372 #if TARGET_MACHO
1373 static void macho_branch_islands (void);
1374 #endif
1375 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1376 int, int *);
1377 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1378 int, int, int *);
1379 static bool rs6000_mode_dependent_address (const_rtx);
1380 static bool rs6000_debug_mode_dependent_address (const_rtx);
1381 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1382 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1383 machine_mode, rtx);
1384 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1385 machine_mode,
1386 rtx);
1387 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1388 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1389 enum reg_class);
1390 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1391 reg_class_t,
1392 reg_class_t);
1393 static bool rs6000_debug_can_change_mode_class (machine_mode,
1394 machine_mode,
1395 reg_class_t);
1396 static bool rs6000_save_toc_in_prologue_p (void);
1397 static rtx rs6000_internal_arg_pointer (void);
1399 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1400 int, int *)
1401 = rs6000_legitimize_reload_address;
1403 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1404 = rs6000_mode_dependent_address;
1406 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1407 machine_mode, rtx)
1408 = rs6000_secondary_reload_class;
1410 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1411 = rs6000_preferred_reload_class;
1413 const int INSN_NOT_AVAILABLE = -1;
1415 static void rs6000_print_isa_options (FILE *, int, const char *,
1416 HOST_WIDE_INT);
1417 static void rs6000_print_builtin_options (FILE *, int, const char *,
1418 HOST_WIDE_INT);
1419 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1421 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1422 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1423 enum rs6000_reg_type,
1424 machine_mode,
1425 secondary_reload_info *,
1426 bool);
1427 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1428 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1429 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1431 /* Hash table stuff for keeping track of TOC entries. */
1433 struct GTY((for_user)) toc_hash_struct
1435 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1436 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1437 rtx key;
1438 machine_mode key_mode;
1439 int labelno;
1442 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1444 static hashval_t hash (toc_hash_struct *);
1445 static bool equal (toc_hash_struct *, toc_hash_struct *);
1448 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1450 /* Hash table to keep track of the argument types for builtin functions. */
1452 struct GTY((for_user)) builtin_hash_struct
1454 tree type;
1455 machine_mode mode[4]; /* return value + 3 arguments. */
1456 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1459 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1461 static hashval_t hash (builtin_hash_struct *);
1462 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1465 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1468 /* Default register names. */
1469 char rs6000_reg_names[][8] =
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "0", "1", "2", "3", "4", "5", "6", "7",
1476 "8", "9", "10", "11", "12", "13", "14", "15",
1477 "16", "17", "18", "19", "20", "21", "22", "23",
1478 "24", "25", "26", "27", "28", "29", "30", "31",
1479 "mq", "lr", "ctr","ap",
1480 "0", "1", "2", "3", "4", "5", "6", "7",
1481 "ca",
1482 /* AltiVec registers. */
1483 "0", "1", "2", "3", "4", "5", "6", "7",
1484 "8", "9", "10", "11", "12", "13", "14", "15",
1485 "16", "17", "18", "19", "20", "21", "22", "23",
1486 "24", "25", "26", "27", "28", "29", "30", "31",
1487 "vrsave", "vscr",
1488 /* Soft frame pointer. */
1489 "sfp",
1490 /* HTM SPR registers. */
1491 "tfhar", "tfiar", "texasr"
1494 #ifdef TARGET_REGNAMES
1495 static const char alt_reg_names[][8] =
1497 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1498 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1499 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1500 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1501 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1502 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1503 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1504 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1505 "mq", "lr", "ctr", "ap",
1506 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1507 "ca",
1508 /* AltiVec registers. */
1509 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1510 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1511 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1512 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1513 "vrsave", "vscr",
1514 /* Soft frame pointer. */
1515 "sfp",
1516 /* HTM SPR registers. */
1517 "tfhar", "tfiar", "texasr"
1519 #endif
1521 /* Table of valid machine attributes. */
1523 static const struct attribute_spec rs6000_attribute_table[] =
1525 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1526 affects_type_identity, handler, exclude } */
1527 { "altivec", 1, 1, false, true, false, false,
1528 rs6000_handle_altivec_attribute, NULL },
1529 { "longcall", 0, 0, false, true, true, false,
1530 rs6000_handle_longcall_attribute, NULL },
1531 { "shortcall", 0, 0, false, true, true, false,
1532 rs6000_handle_longcall_attribute, NULL },
1533 { "ms_struct", 0, 0, false, false, false, false,
1534 rs6000_handle_struct_attribute, NULL },
1535 { "gcc_struct", 0, 0, false, false, false, false,
1536 rs6000_handle_struct_attribute, NULL },
1537 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1538 SUBTARGET_ATTRIBUTE_TABLE,
1539 #endif
1540 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1543 #ifndef TARGET_PROFILE_KERNEL
1544 #define TARGET_PROFILE_KERNEL 0
1545 #endif
1547 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1548 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1550 /* Initialize the GCC target structure. */
1551 #undef TARGET_ATTRIBUTE_TABLE
1552 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1553 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1554 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1555 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1556 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1558 #undef TARGET_ASM_ALIGNED_DI_OP
1559 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1561 /* Default unaligned ops are only provided for ELF. Find the ops needed
1562 for non-ELF systems. */
1563 #ifndef OBJECT_FORMAT_ELF
1564 #if TARGET_XCOFF
1565 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1566 64-bit targets. */
1567 #undef TARGET_ASM_UNALIGNED_HI_OP
1568 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1569 #undef TARGET_ASM_UNALIGNED_SI_OP
1570 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1571 #undef TARGET_ASM_UNALIGNED_DI_OP
1572 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1573 #else
1574 /* For Darwin. */
1575 #undef TARGET_ASM_UNALIGNED_HI_OP
1576 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1577 #undef TARGET_ASM_UNALIGNED_SI_OP
1578 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1579 #undef TARGET_ASM_UNALIGNED_DI_OP
1580 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1581 #undef TARGET_ASM_ALIGNED_DI_OP
1582 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1583 #endif
1584 #endif
1586 /* This hook deals with fixups for relocatable code and DI-mode objects
1587 in 64-bit code. */
1588 #undef TARGET_ASM_INTEGER
1589 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1591 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1592 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1593 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1594 #endif
1596 #undef TARGET_SET_UP_BY_PROLOGUE
1597 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1599 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1601 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1602 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1603 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1605 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1607 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1608 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1609 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1610 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1612 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1613 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1615 #undef TARGET_INTERNAL_ARG_POINTER
1616 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1618 #undef TARGET_HAVE_TLS
1619 #define TARGET_HAVE_TLS HAVE_AS_TLS
1621 #undef TARGET_CANNOT_FORCE_CONST_MEM
1622 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1624 #undef TARGET_DELEGITIMIZE_ADDRESS
1625 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1627 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1628 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1630 #undef TARGET_LEGITIMATE_COMBINED_INSN
1631 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1633 #undef TARGET_ASM_FUNCTION_PROLOGUE
1634 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1635 #undef TARGET_ASM_FUNCTION_EPILOGUE
1636 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1638 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1639 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1641 #undef TARGET_LEGITIMIZE_ADDRESS
1642 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1644 #undef TARGET_SCHED_VARIABLE_ISSUE
1645 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1647 #undef TARGET_SCHED_ISSUE_RATE
1648 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1649 #undef TARGET_SCHED_ADJUST_COST
1650 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1651 #undef TARGET_SCHED_ADJUST_PRIORITY
1652 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1653 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1654 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1655 #undef TARGET_SCHED_INIT
1656 #define TARGET_SCHED_INIT rs6000_sched_init
1657 #undef TARGET_SCHED_FINISH
1658 #define TARGET_SCHED_FINISH rs6000_sched_finish
1659 #undef TARGET_SCHED_REORDER
1660 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1661 #undef TARGET_SCHED_REORDER2
1662 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1664 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1665 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1667 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1668 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1670 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1671 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1672 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1673 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1674 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1675 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1676 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1677 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1679 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1680 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1682 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1683 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1684 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1685 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1686 rs6000_builtin_support_vector_misalignment
1687 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1688 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1689 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1690 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1691 rs6000_builtin_vectorization_cost
1692 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1693 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1694 rs6000_preferred_simd_mode
1695 #undef TARGET_VECTORIZE_INIT_COST
1696 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1697 #undef TARGET_VECTORIZE_ADD_STMT_COST
1698 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1699 #undef TARGET_VECTORIZE_FINISH_COST
1700 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1701 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1702 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1704 #undef TARGET_INIT_BUILTINS
1705 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1706 #undef TARGET_BUILTIN_DECL
1707 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1709 #undef TARGET_FOLD_BUILTIN
1710 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1711 #undef TARGET_GIMPLE_FOLD_BUILTIN
1712 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1714 #undef TARGET_EXPAND_BUILTIN
1715 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1717 #undef TARGET_MANGLE_TYPE
1718 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1720 #undef TARGET_INIT_LIBFUNCS
1721 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1723 #if TARGET_MACHO
1724 #undef TARGET_BINDS_LOCAL_P
1725 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1726 #endif
1728 #undef TARGET_MS_BITFIELD_LAYOUT_P
1729 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1731 #undef TARGET_ASM_OUTPUT_MI_THUNK
1732 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1734 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1735 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1737 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1738 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1740 #undef TARGET_REGISTER_MOVE_COST
1741 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1742 #undef TARGET_MEMORY_MOVE_COST
1743 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1744 #undef TARGET_CANNOT_COPY_INSN_P
1745 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1746 #undef TARGET_RTX_COSTS
1747 #define TARGET_RTX_COSTS rs6000_rtx_costs
1748 #undef TARGET_ADDRESS_COST
1749 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1750 #undef TARGET_INSN_COST
1751 #define TARGET_INSN_COST rs6000_insn_cost
1753 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1754 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1756 #undef TARGET_PROMOTE_FUNCTION_MODE
1757 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1759 #undef TARGET_RETURN_IN_MEMORY
1760 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1762 #undef TARGET_RETURN_IN_MSB
1763 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1765 #undef TARGET_SETUP_INCOMING_VARARGS
1766 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1768 /* Always strict argument naming on rs6000. */
1769 #undef TARGET_STRICT_ARGUMENT_NAMING
1770 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1771 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1772 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1773 #undef TARGET_SPLIT_COMPLEX_ARG
1774 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1775 #undef TARGET_MUST_PASS_IN_STACK
1776 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1777 #undef TARGET_PASS_BY_REFERENCE
1778 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1779 #undef TARGET_ARG_PARTIAL_BYTES
1780 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1781 #undef TARGET_FUNCTION_ARG_ADVANCE
1782 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1783 #undef TARGET_FUNCTION_ARG
1784 #define TARGET_FUNCTION_ARG rs6000_function_arg
1785 #undef TARGET_FUNCTION_ARG_PADDING
1786 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1787 #undef TARGET_FUNCTION_ARG_BOUNDARY
1788 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1790 #undef TARGET_BUILD_BUILTIN_VA_LIST
1791 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1793 #undef TARGET_EXPAND_BUILTIN_VA_START
1794 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1796 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1797 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1799 #undef TARGET_EH_RETURN_FILTER_MODE
1800 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1802 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1803 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1805 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1806 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1808 #undef TARGET_FLOATN_MODE
1809 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1811 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1812 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1814 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1815 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1817 #undef TARGET_MD_ASM_ADJUST
1818 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1820 #undef TARGET_OPTION_OVERRIDE
1821 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1823 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1825 rs6000_builtin_vectorized_function
1827 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1828 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1829 rs6000_builtin_md_vectorized_function
1831 #undef TARGET_STACK_PROTECT_GUARD
1832 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1834 #if !TARGET_MACHO
1835 #undef TARGET_STACK_PROTECT_FAIL
1836 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1837 #endif
1839 #ifdef HAVE_AS_TLS
1840 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1841 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1842 #endif
1844 /* Use a 32-bit anchor range. This leads to sequences like:
1846 addis tmp,anchor,high
1847 add dest,tmp,low
1849 where tmp itself acts as an anchor, and can be shared between
1850 accesses to the same 64k page. */
1851 #undef TARGET_MIN_ANCHOR_OFFSET
1852 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1853 #undef TARGET_MAX_ANCHOR_OFFSET
1854 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1855 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1856 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1857 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1858 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1860 #undef TARGET_BUILTIN_RECIPROCAL
1861 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1863 #undef TARGET_SECONDARY_RELOAD
1864 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1865 #undef TARGET_SECONDARY_MEMORY_NEEDED
1866 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1867 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1868 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1870 #undef TARGET_LEGITIMATE_ADDRESS_P
1871 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1873 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1874 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1876 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1877 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1879 #undef TARGET_CAN_ELIMINATE
1880 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1882 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1883 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1885 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1886 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1888 #undef TARGET_TRAMPOLINE_INIT
1889 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1891 #undef TARGET_FUNCTION_VALUE
1892 #define TARGET_FUNCTION_VALUE rs6000_function_value
1894 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1895 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1897 #undef TARGET_OPTION_SAVE
1898 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1900 #undef TARGET_OPTION_RESTORE
1901 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1903 #undef TARGET_OPTION_PRINT
1904 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1906 #undef TARGET_CAN_INLINE_P
1907 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1909 #undef TARGET_SET_CURRENT_FUNCTION
1910 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1912 #undef TARGET_LEGITIMATE_CONSTANT_P
1913 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1915 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1916 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1918 #undef TARGET_CAN_USE_DOLOOP_P
1919 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1921 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1922 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1924 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1925 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1926 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1927 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1928 #undef TARGET_UNWIND_WORD_MODE
1929 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1931 #undef TARGET_OFFLOAD_OPTIONS
1932 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1934 #undef TARGET_C_MODE_FOR_SUFFIX
1935 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1937 #undef TARGET_INVALID_BINARY_OP
1938 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1940 #undef TARGET_OPTAB_SUPPORTED_P
1941 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1943 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1944 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1946 #undef TARGET_COMPARE_VERSION_PRIORITY
1947 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1949 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1950 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1951 rs6000_generate_version_dispatcher_body
1953 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1954 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1955 rs6000_get_function_versions_dispatcher
1957 #undef TARGET_OPTION_FUNCTION_VERSIONS
1958 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1960 #undef TARGET_HARD_REGNO_NREGS
1961 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1962 #undef TARGET_HARD_REGNO_MODE_OK
1963 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1965 #undef TARGET_MODES_TIEABLE_P
1966 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1968 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1969 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1970 rs6000_hard_regno_call_part_clobbered
1972 #undef TARGET_SLOW_UNALIGNED_ACCESS
1973 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1975 #undef TARGET_CAN_CHANGE_MODE_CLASS
1976 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1978 #undef TARGET_CONSTANT_ALIGNMENT
1979 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1981 #undef TARGET_STARTING_FRAME_OFFSET
1982 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1985 /* Processor table. */
1986 struct rs6000_ptt
1988 const char *const name; /* Canonical processor name. */
1989 const enum processor_type processor; /* Processor type enum value. */
1990 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 static struct rs6000_ptt const processor_target_table[] =
1995 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1996 #include "rs6000-cpus.def"
1997 #undef RS6000_CPU
2000 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2001 name is invalid. */
2003 static int
2004 rs6000_cpu_name_lookup (const char *name)
2006 size_t i;
2008 if (name != NULL)
2010 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2011 if (! strcmp (name, processor_target_table[i].name))
2012 return (int)i;
2015 return -1;
2019 /* Return number of consecutive hard regs needed starting at reg REGNO
2020 to hold something of mode MODE.
2021 This is ordinarily the length in words of a value of mode MODE
2022 but can be less for certain modes in special long registers.
2024 POWER and PowerPC GPRs hold 32 bits worth;
2025 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2027 static int
2028 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2030 unsigned HOST_WIDE_INT reg_size;
2032 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2033 128-bit floating point that can go in vector registers, which has VSX
2034 memory addressing. */
2035 if (FP_REGNO_P (regno))
2036 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2037 ? UNITS_PER_VSX_WORD
2038 : UNITS_PER_FP_WORD);
2040 else if (ALTIVEC_REGNO_P (regno))
2041 reg_size = UNITS_PER_ALTIVEC_WORD;
2043 else
2044 reg_size = UNITS_PER_WORD;
2046 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2050 MODE. */
2051 static int
2052 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2054 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2056 if (COMPLEX_MODE_P (mode))
2057 mode = GET_MODE_INNER (mode);
2059 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2060 register combinations, and use PTImode where we need to deal with quad
2061 word memory operations. Don't allow quad words in the argument or frame
2062 pointer registers, just registers 0..31. */
2063 if (mode == PTImode)
2064 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2065 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2066 && ((regno & 1) == 0));
2068 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2069 implementations. Don't allow an item to be split between a FP register
2070 and an Altivec register. Allow TImode in all VSX registers if the user
2071 asked for it. */
2072 if (TARGET_VSX && VSX_REGNO_P (regno)
2073 && (VECTOR_MEM_VSX_P (mode)
2074 || FLOAT128_VECTOR_P (mode)
2075 || reg_addr[mode].scalar_in_vmx_p
2076 || mode == TImode
2077 || (TARGET_VADDUQM && mode == V1TImode)))
2079 if (FP_REGNO_P (regno))
2080 return FP_REGNO_P (last_regno);
2082 if (ALTIVEC_REGNO_P (regno))
2084 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2085 return 0;
2087 return ALTIVEC_REGNO_P (last_regno);
2091 /* The GPRs can hold any mode, but values bigger than one register
2092 cannot go past R31. */
2093 if (INT_REGNO_P (regno))
2094 return INT_REGNO_P (last_regno);
2096 /* The float registers (except for VSX vector modes) can only hold floating
2097 modes and DImode. */
2098 if (FP_REGNO_P (regno))
2100 if (FLOAT128_VECTOR_P (mode))
2101 return false;
2103 if (SCALAR_FLOAT_MODE_P (mode)
2104 && (mode != TDmode || (regno % 2) == 0)
2105 && FP_REGNO_P (last_regno))
2106 return 1;
2108 if (GET_MODE_CLASS (mode) == MODE_INT)
2110 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2111 return 1;
2113 if (TARGET_P8_VECTOR && (mode == SImode))
2114 return 1;
2116 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2117 return 1;
2120 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2121 && PAIRED_VECTOR_MODE (mode))
2122 return 1;
2124 return 0;
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno))
2129 return GET_MODE_CLASS (mode) == MODE_CC;
2131 if (CA_REGNO_P (regno))
2132 return mode == Pmode || mode == SImode;
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2137 || mode == V1TImode);
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2142 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2147 static unsigned int
2148 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2150 return rs6000_hard_regno_nregs[mode][regno];
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2155 static bool
2156 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2158 return rs6000_hard_regno_mode_ok_p[mode][regno];
2161 /* Implement TARGET_MODES_TIEABLE_P.
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 57744).
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2170 static bool
2171 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2173 if (mode1 == PTImode)
2174 return mode2 == PTImode;
2175 if (mode2 == PTImode)
2176 return false;
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2181 return false;
2183 if (SCALAR_FLOAT_MODE_P (mode1))
2184 return SCALAR_FLOAT_MODE_P (mode2);
2185 if (SCALAR_FLOAT_MODE_P (mode2))
2186 return false;
2188 if (GET_MODE_CLASS (mode1) == MODE_CC)
2189 return GET_MODE_CLASS (mode2) == MODE_CC;
2190 if (GET_MODE_CLASS (mode2) == MODE_CC)
2191 return false;
2193 if (PAIRED_VECTOR_MODE (mode1))
2194 return PAIRED_VECTOR_MODE (mode2);
2195 if (PAIRED_VECTOR_MODE (mode2))
2196 return false;
2198 return true;
2201 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2203 static bool
2204 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2206 if (TARGET_32BIT
2207 && TARGET_POWERPC64
2208 && GET_MODE_SIZE (mode) > 4
2209 && INT_REGNO_P (regno))
2210 return true;
2212 if (TARGET_VSX
2213 && FP_REGNO_P (regno)
2214 && GET_MODE_SIZE (mode) > 8
2215 && !FLOAT128_2REG_P (mode))
2216 return true;
2218 return false;
2221 /* Print interesting facts about registers. */
2222 static void
2223 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2225 int r, m;
2227 for (r = first_regno; r <= last_regno; ++r)
2229 const char *comma = "";
2230 int len;
2232 if (first_regno == last_regno)
2233 fprintf (stderr, "%s:\t", reg_name);
2234 else
2235 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2237 len = 8;
2238 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2239 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2241 if (len > 70)
2243 fprintf (stderr, ",\n\t");
2244 len = 8;
2245 comma = "";
2248 if (rs6000_hard_regno_nregs[m][r] > 1)
2249 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2250 rs6000_hard_regno_nregs[m][r]);
2251 else
2252 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2254 comma = ", ";
2257 if (call_used_regs[r])
2259 if (len > 70)
2261 fprintf (stderr, ",\n\t");
2262 len = 8;
2263 comma = "";
2266 len += fprintf (stderr, "%s%s", comma, "call-used");
2267 comma = ", ";
2270 if (fixed_regs[r])
2272 if (len > 70)
2274 fprintf (stderr, ",\n\t");
2275 len = 8;
2276 comma = "";
2279 len += fprintf (stderr, "%s%s", comma, "fixed");
2280 comma = ", ";
2283 if (len > 70)
2285 fprintf (stderr, ",\n\t");
2286 comma = "";
2289 len += fprintf (stderr, "%sreg-class = %s", comma,
2290 reg_class_names[(int)rs6000_regno_regclass[r]]);
2291 comma = ", ";
2293 if (len > 70)
2295 fprintf (stderr, ",\n\t");
2296 comma = "";
2299 fprintf (stderr, "%sregno = %d\n", comma, r);
2303 static const char *
2304 rs6000_debug_vector_unit (enum rs6000_vector v)
2306 const char *ret;
2308 switch (v)
2310 case VECTOR_NONE: ret = "none"; break;
2311 case VECTOR_ALTIVEC: ret = "altivec"; break;
2312 case VECTOR_VSX: ret = "vsx"; break;
2313 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2314 case VECTOR_PAIRED: ret = "paired"; break;
2315 case VECTOR_OTHER: ret = "other"; break;
2316 default: ret = "unknown"; break;
2319 return ret;
2322 /* Inner function printing just the address mask for a particular reload
2323 register class. */
2324 DEBUG_FUNCTION char *
2325 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2327 static char ret[8];
2328 char *p = ret;
2330 if ((mask & RELOAD_REG_VALID) != 0)
2331 *p++ = 'v';
2332 else if (keep_spaces)
2333 *p++ = ' ';
2335 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2336 *p++ = 'm';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_INDEXED) != 0)
2341 *p++ = 'i';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2345 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2346 *p++ = 'O';
2347 else if ((mask & RELOAD_REG_OFFSET) != 0)
2348 *p++ = 'o';
2349 else if (keep_spaces)
2350 *p++ = ' ';
2352 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2353 *p++ = '+';
2354 else if (keep_spaces)
2355 *p++ = ' ';
2357 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2358 *p++ = '+';
2359 else if (keep_spaces)
2360 *p++ = ' ';
2362 if ((mask & RELOAD_REG_AND_M16) != 0)
2363 *p++ = '&';
2364 else if (keep_spaces)
2365 *p++ = ' ';
2367 *p = '\0';
2369 return ret;
2372 /* Print the address masks in a human readble fashion. */
2373 DEBUG_FUNCTION void
2374 rs6000_debug_print_mode (ssize_t m)
2376 ssize_t rc;
2377 int spaces = 0;
2378 bool fuse_extra_p;
2380 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2381 for (rc = 0; rc < N_RELOAD_REG; rc++)
2382 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2383 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2385 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2386 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2387 fprintf (stderr, " Reload=%c%c",
2388 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2389 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2390 else
2391 spaces += sizeof (" Reload=sl") - 1;
2393 if (reg_addr[m].scalar_in_vmx_p)
2395 fprintf (stderr, "%*s Upper=y", spaces, "");
2396 spaces = 0;
2398 else
2399 spaces += sizeof (" Upper=y") - 1;
2401 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2402 || reg_addr[m].fused_toc);
2403 if (!fuse_extra_p)
2405 for (rc = 0; rc < N_RELOAD_REG; rc++)
2407 if (rc != RELOAD_REG_ANY)
2409 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2410 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2411 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2412 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2413 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2415 fuse_extra_p = true;
2416 break;
2422 if (fuse_extra_p)
2424 fprintf (stderr, "%*s Fuse:", spaces, "");
2425 spaces = 0;
2427 for (rc = 0; rc < N_RELOAD_REG; rc++)
2429 if (rc != RELOAD_REG_ANY)
2431 char load, store;
2433 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2434 load = 'l';
2435 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2436 load = 'L';
2437 else
2438 load = '-';
2440 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2441 store = 's';
2442 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2443 store = 'S';
2444 else
2445 store = '-';
2447 if (load == '-' && store == '-')
2448 spaces += 5;
2449 else
2451 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2452 reload_reg_map[rc].name[0], load, store);
2453 spaces = 0;
2458 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2460 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2461 spaces = 0;
2463 else
2464 spaces += sizeof (" P8gpr") - 1;
2466 if (reg_addr[m].fused_toc)
2468 fprintf (stderr, "%*sToc", (spaces + 1), "");
2469 spaces = 0;
2471 else
2472 spaces += sizeof (" Toc") - 1;
2474 else
2475 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2477 if (rs6000_vector_unit[m] != VECTOR_NONE
2478 || rs6000_vector_mem[m] != VECTOR_NONE)
2480 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2481 spaces, "",
2482 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2483 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2486 fputs ("\n", stderr);
2489 #define DEBUG_FMT_ID "%-32s= "
2490 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2491 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2492 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2494 /* Print various interesting information with -mdebug=reg. */
2495 static void
2496 rs6000_debug_reg_global (void)
2498 static const char *const tf[2] = { "false", "true" };
2499 const char *nl = (const char *)0;
2500 int m;
2501 size_t m1, m2, v;
2502 char costly_num[20];
2503 char nop_num[20];
2504 char flags_buffer[40];
2505 const char *costly_str;
2506 const char *nop_str;
2507 const char *trace_str;
2508 const char *abi_str;
2509 const char *cmodel_str;
2510 struct cl_target_option cl_opts;
2512 /* Modes we want tieable information on. */
2513 static const machine_mode print_tieable_modes[] = {
2514 QImode,
2515 HImode,
2516 SImode,
2517 DImode,
2518 TImode,
2519 PTImode,
2520 SFmode,
2521 DFmode,
2522 TFmode,
2523 IFmode,
2524 KFmode,
2525 SDmode,
2526 DDmode,
2527 TDmode,
2528 V2SImode,
2529 V16QImode,
2530 V8HImode,
2531 V4SImode,
2532 V2DImode,
2533 V1TImode,
2534 V32QImode,
2535 V16HImode,
2536 V8SImode,
2537 V4DImode,
2538 V2TImode,
2539 V2SFmode,
2540 V4SFmode,
2541 V2DFmode,
2542 V8SFmode,
2543 V4DFmode,
2544 CCmode,
2545 CCUNSmode,
2546 CCEQmode,
2549 /* Virtual regs we are interested in. */
2550 const static struct {
2551 int regno; /* register number. */
2552 const char *name; /* register name. */
2553 } virtual_regs[] = {
2554 { STACK_POINTER_REGNUM, "stack pointer:" },
2555 { TOC_REGNUM, "toc: " },
2556 { STATIC_CHAIN_REGNUM, "static chain: " },
2557 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2558 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2559 { ARG_POINTER_REGNUM, "arg pointer: " },
2560 { FRAME_POINTER_REGNUM, "frame pointer:" },
2561 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2562 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2563 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2564 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2565 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2566 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2567 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2568 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2569 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2572 fputs ("\nHard register information:\n", stderr);
2573 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2574 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2575 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2576 LAST_ALTIVEC_REGNO,
2577 "vs");
2578 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2579 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2580 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2581 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2582 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2583 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2585 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2586 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2587 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2589 fprintf (stderr,
2590 "\n"
2591 "d reg_class = %s\n"
2592 "f reg_class = %s\n"
2593 "v reg_class = %s\n"
2594 "wa reg_class = %s\n"
2595 "wb reg_class = %s\n"
2596 "wd reg_class = %s\n"
2597 "we reg_class = %s\n"
2598 "wf reg_class = %s\n"
2599 "wg reg_class = %s\n"
2600 "wh reg_class = %s\n"
2601 "wi reg_class = %s\n"
2602 "wj reg_class = %s\n"
2603 "wk reg_class = %s\n"
2604 "wl reg_class = %s\n"
2605 "wm reg_class = %s\n"
2606 "wo reg_class = %s\n"
2607 "wp reg_class = %s\n"
2608 "wq reg_class = %s\n"
2609 "wr reg_class = %s\n"
2610 "ws reg_class = %s\n"
2611 "wt reg_class = %s\n"
2612 "wu reg_class = %s\n"
2613 "wv reg_class = %s\n"
2614 "ww reg_class = %s\n"
2615 "wx reg_class = %s\n"
2616 "wy reg_class = %s\n"
2617 "wz reg_class = %s\n"
2618 "wA reg_class = %s\n"
2619 "wH reg_class = %s\n"
2620 "wI reg_class = %s\n"
2621 "wJ reg_class = %s\n"
2622 "wK reg_class = %s\n"
2623 "\n",
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2651 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2652 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2653 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2654 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2655 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2657 nl = "\n";
2658 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2659 rs6000_debug_print_mode (m);
2661 fputs ("\n", stderr);
2663 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2665 machine_mode mode1 = print_tieable_modes[m1];
2666 bool first_time = true;
2668 nl = (const char *)0;
2669 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2671 machine_mode mode2 = print_tieable_modes[m2];
2672 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2674 if (first_time)
2676 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2677 nl = "\n";
2678 first_time = false;
2681 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2685 if (!first_time)
2686 fputs ("\n", stderr);
2689 if (nl)
2690 fputs (nl, stderr);
2692 if (rs6000_recip_control)
2694 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2696 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2697 if (rs6000_recip_bits[m])
2699 fprintf (stderr,
2700 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2701 GET_MODE_NAME (m),
2702 (RS6000_RECIP_AUTO_RE_P (m)
2703 ? "auto"
2704 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2705 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2706 ? "auto"
2707 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2710 fputs ("\n", stderr);
2713 if (rs6000_cpu_index >= 0)
2715 const char *name = processor_target_table[rs6000_cpu_index].name;
2716 HOST_WIDE_INT flags
2717 = processor_target_table[rs6000_cpu_index].target_enable;
2719 sprintf (flags_buffer, "-mcpu=%s flags", name);
2720 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2722 else
2723 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2725 if (rs6000_tune_index >= 0)
2727 const char *name = processor_target_table[rs6000_tune_index].name;
2728 HOST_WIDE_INT flags
2729 = processor_target_table[rs6000_tune_index].target_enable;
2731 sprintf (flags_buffer, "-mtune=%s flags", name);
2732 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2734 else
2735 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2737 cl_target_option_save (&cl_opts, &global_options);
2738 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2739 rs6000_isa_flags);
2741 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2742 rs6000_isa_flags_explicit);
2744 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2745 rs6000_builtin_mask);
2747 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2749 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2750 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2752 switch (rs6000_sched_costly_dep)
2754 case max_dep_latency:
2755 costly_str = "max_dep_latency";
2756 break;
2758 case no_dep_costly:
2759 costly_str = "no_dep_costly";
2760 break;
2762 case all_deps_costly:
2763 costly_str = "all_deps_costly";
2764 break;
2766 case true_store_to_load_dep_costly:
2767 costly_str = "true_store_to_load_dep_costly";
2768 break;
2770 case store_to_load_dep_costly:
2771 costly_str = "store_to_load_dep_costly";
2772 break;
2774 default:
2775 costly_str = costly_num;
2776 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2777 break;
2780 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2782 switch (rs6000_sched_insert_nops)
2784 case sched_finish_regroup_exact:
2785 nop_str = "sched_finish_regroup_exact";
2786 break;
2788 case sched_finish_pad_groups:
2789 nop_str = "sched_finish_pad_groups";
2790 break;
2792 case sched_finish_none:
2793 nop_str = "sched_finish_none";
2794 break;
2796 default:
2797 nop_str = nop_num;
2798 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2799 break;
2802 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2804 switch (rs6000_sdata)
2806 default:
2807 case SDATA_NONE:
2808 break;
2810 case SDATA_DATA:
2811 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2812 break;
2814 case SDATA_SYSV:
2815 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2816 break;
2818 case SDATA_EABI:
2819 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2820 break;
2824 switch (rs6000_traceback)
2826 case traceback_default: trace_str = "default"; break;
2827 case traceback_none: trace_str = "none"; break;
2828 case traceback_part: trace_str = "part"; break;
2829 case traceback_full: trace_str = "full"; break;
2830 default: trace_str = "unknown"; break;
2833 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2835 switch (rs6000_current_cmodel)
2837 case CMODEL_SMALL: cmodel_str = "small"; break;
2838 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2839 case CMODEL_LARGE: cmodel_str = "large"; break;
2840 default: cmodel_str = "unknown"; break;
2843 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2845 switch (rs6000_current_abi)
2847 case ABI_NONE: abi_str = "none"; break;
2848 case ABI_AIX: abi_str = "aix"; break;
2849 case ABI_ELFv2: abi_str = "ELFv2"; break;
2850 case ABI_V4: abi_str = "V4"; break;
2851 case ABI_DARWIN: abi_str = "darwin"; break;
2852 default: abi_str = "unknown"; break;
2855 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2857 if (rs6000_altivec_abi)
2858 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2860 if (rs6000_darwin64_abi)
2861 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2863 fprintf (stderr, DEBUG_FMT_S, "single_float",
2864 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2866 fprintf (stderr, DEBUG_FMT_S, "double_float",
2867 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2869 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2870 (TARGET_SOFT_FLOAT ? "true" : "false"));
2872 if (TARGET_LINK_STACK)
2873 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2875 if (TARGET_P8_FUSION)
2877 char options[80];
2879 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2880 if (TARGET_TOC_FUSION)
2881 strcat (options, ", toc");
2883 if (TARGET_P8_FUSION_SIGN)
2884 strcat (options, ", sign");
2886 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2889 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2890 TARGET_SECURE_PLT ? "secure" : "bss");
2891 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2892 aix_struct_return ? "aix" : "sysv");
2893 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2894 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2895 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2896 tf[!!rs6000_align_branch_targets]);
2897 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2898 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2899 rs6000_long_double_type_size);
2900 if (rs6000_long_double_type_size == 128)
2902 fprintf (stderr, DEBUG_FMT_S, "long double type",
2903 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2904 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2905 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2907 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2908 (int)rs6000_sched_restricted_insns_priority);
2909 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2910 (int)END_BUILTINS);
2911 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2912 (int)RS6000_BUILTIN_COUNT);
2914 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2915 (int)TARGET_FLOAT128_ENABLE_TYPE);
2917 if (TARGET_VSX)
2918 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2919 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2921 if (TARGET_DIRECT_MOVE_128)
2922 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2923 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2927 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2928 legitimate address support to figure out the appropriate addressing to
2929 use. */
2931 static void
2932 rs6000_setup_reg_addr_masks (void)
2934 ssize_t rc, reg, m, nregs;
2935 addr_mask_type any_addr_mask, addr_mask;
2937 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2939 machine_mode m2 = (machine_mode) m;
2940 bool complex_p = false;
2941 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2942 size_t msize;
2944 if (COMPLEX_MODE_P (m2))
2946 complex_p = true;
2947 m2 = GET_MODE_INNER (m2);
2950 msize = GET_MODE_SIZE (m2);
2952 /* SDmode is special in that we want to access it only via REG+REG
2953 addressing on power7 and above, since we want to use the LFIWZX and
2954 STFIWZX instructions to load it. */
2955 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2957 any_addr_mask = 0;
2958 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2960 addr_mask = 0;
2961 reg = reload_reg_map[rc].reg;
2963 /* Can mode values go in the GPR/FPR/Altivec registers? */
2964 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2966 bool small_int_vsx_p = (small_int_p
2967 && (rc == RELOAD_REG_FPR
2968 || rc == RELOAD_REG_VMX));
2970 nregs = rs6000_hard_regno_nregs[m][reg];
2971 addr_mask |= RELOAD_REG_VALID;
2973 /* Indicate if the mode takes more than 1 physical register. If
2974 it takes a single register, indicate it can do REG+REG
2975 addressing. Small integers in VSX registers can only do
2976 REG+REG addressing. */
2977 if (small_int_vsx_p)
2978 addr_mask |= RELOAD_REG_INDEXED;
2979 else if (nregs > 1 || m == BLKmode || complex_p)
2980 addr_mask |= RELOAD_REG_MULTIPLE;
2981 else
2982 addr_mask |= RELOAD_REG_INDEXED;
2984 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2985 addressing. If we allow scalars into Altivec registers,
2986 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2988 For VSX systems, we don't allow update addressing for
2989 DFmode/SFmode if those registers can go in both the
2990 traditional floating point registers and Altivec registers.
2991 The load/store instructions for the Altivec registers do not
2992 have update forms. If we allowed update addressing, it seems
2993 to break IV-OPT code using floating point if the index type is
2994 int instead of long (PR target/81550 and target/84042). */
2996 if (TARGET_UPDATE
2997 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2998 && msize <= 8
2999 && !VECTOR_MODE_P (m2)
3000 && !FLOAT128_VECTOR_P (m2)
3001 && !complex_p
3002 && (m != E_DFmode || !TARGET_VSX)
3003 && (m != E_SFmode || !TARGET_P8_VECTOR)
3004 && !small_int_vsx_p)
3006 addr_mask |= RELOAD_REG_PRE_INCDEC;
3008 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
3009 we don't allow PRE_MODIFY for some multi-register
3010 operations. */
3011 switch (m)
3013 default:
3014 addr_mask |= RELOAD_REG_PRE_MODIFY;
3015 break;
3017 case E_DImode:
3018 if (TARGET_POWERPC64)
3019 addr_mask |= RELOAD_REG_PRE_MODIFY;
3020 break;
3022 case E_DFmode:
3023 case E_DDmode:
3024 if (TARGET_DF_INSN)
3025 addr_mask |= RELOAD_REG_PRE_MODIFY;
3026 break;
3031 /* GPR and FPR registers can do REG+OFFSET addressing, except
3032 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3033 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3034 if ((addr_mask != 0) && !indexed_only_p
3035 && msize <= 8
3036 && (rc == RELOAD_REG_GPR
3037 || ((msize == 8 || m2 == SFmode)
3038 && (rc == RELOAD_REG_FPR
3039 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3040 addr_mask |= RELOAD_REG_OFFSET;
3042 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3043 instructions are enabled. The offset for 128-bit VSX registers is
3044 only 12-bits. While GPRs can handle the full offset range, VSX
3045 registers can only handle the restricted range. */
3046 else if ((addr_mask != 0) && !indexed_only_p
3047 && msize == 16 && TARGET_P9_VECTOR
3048 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3049 || (m2 == TImode && TARGET_VSX)))
3051 addr_mask |= RELOAD_REG_OFFSET;
3052 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3053 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3056 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3057 addressing on 128-bit types. */
3058 if (rc == RELOAD_REG_VMX && msize == 16
3059 && (addr_mask & RELOAD_REG_VALID) != 0)
3060 addr_mask |= RELOAD_REG_AND_M16;
3062 reg_addr[m].addr_mask[rc] = addr_mask;
3063 any_addr_mask |= addr_mask;
3066 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3071 /* Initialize the various global tables that are based on register size. */
3072 static void
3073 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3075 ssize_t r, m, c;
3076 int align64;
3077 int align32;
3079 /* Precalculate REGNO_REG_CLASS. */
3080 rs6000_regno_regclass[0] = GENERAL_REGS;
3081 for (r = 1; r < 32; ++r)
3082 rs6000_regno_regclass[r] = BASE_REGS;
3084 for (r = 32; r < 64; ++r)
3085 rs6000_regno_regclass[r] = FLOAT_REGS;
3087 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3088 rs6000_regno_regclass[r] = NO_REGS;
3090 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3091 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3093 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3094 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3095 rs6000_regno_regclass[r] = CR_REGS;
3097 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3098 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3099 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3100 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3101 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3102 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3103 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3104 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3105 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3106 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3108 /* Precalculate register class to simpler reload register class. We don't
3109 need all of the register classes that are combinations of different
3110 classes, just the simple ones that have constraint letters. */
3111 for (c = 0; c < N_REG_CLASSES; c++)
3112 reg_class_to_reg_type[c] = NO_REG_TYPE;
3114 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3115 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3116 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3117 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3118 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3119 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3120 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3121 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3122 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3123 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3125 if (TARGET_VSX)
3127 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3128 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3130 else
3132 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3133 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3136 /* Precalculate the valid memory formats as well as the vector information,
3137 this must be set up before the rs6000_hard_regno_nregs_internal calls
3138 below. */
3139 gcc_assert ((int)VECTOR_NONE == 0);
3140 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3141 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3143 gcc_assert ((int)CODE_FOR_nothing == 0);
3144 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3146 gcc_assert ((int)NO_REGS == 0);
3147 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3149 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3150 believes it can use native alignment or still uses 128-bit alignment. */
3151 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3153 align64 = 64;
3154 align32 = 32;
3156 else
3158 align64 = 128;
3159 align32 = 128;
3162 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3163 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3164 if (TARGET_FLOAT128_TYPE)
3166 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3167 rs6000_vector_align[KFmode] = 128;
3169 if (FLOAT128_IEEE_P (TFmode))
3171 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3172 rs6000_vector_align[TFmode] = 128;
3176 /* V2DF mode, VSX only. */
3177 if (TARGET_VSX)
3179 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3180 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3181 rs6000_vector_align[V2DFmode] = align64;
3184 /* V4SF mode, either VSX or Altivec. */
3185 if (TARGET_VSX)
3187 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3188 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3189 rs6000_vector_align[V4SFmode] = align32;
3191 else if (TARGET_ALTIVEC)
3193 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3194 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3195 rs6000_vector_align[V4SFmode] = align32;
3198 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3199 and stores. */
3200 if (TARGET_ALTIVEC)
3202 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3203 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3204 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3205 rs6000_vector_align[V4SImode] = align32;
3206 rs6000_vector_align[V8HImode] = align32;
3207 rs6000_vector_align[V16QImode] = align32;
3209 if (TARGET_VSX)
3211 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3212 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3213 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3215 else
3217 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3218 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3219 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3223 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3224 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3225 if (TARGET_VSX)
3227 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3228 rs6000_vector_unit[V2DImode]
3229 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3230 rs6000_vector_align[V2DImode] = align64;
3232 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3233 rs6000_vector_unit[V1TImode]
3234 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3235 rs6000_vector_align[V1TImode] = 128;
3238 /* DFmode, see if we want to use the VSX unit. Memory is handled
3239 differently, so don't set rs6000_vector_mem. */
3240 if (TARGET_VSX)
3242 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3243 rs6000_vector_align[DFmode] = 64;
3246 /* SFmode, see if we want to use the VSX unit. */
3247 if (TARGET_P8_VECTOR)
3249 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3250 rs6000_vector_align[SFmode] = 32;
3253 /* Allow TImode in VSX register and set the VSX memory macros. */
3254 if (TARGET_VSX)
3256 rs6000_vector_mem[TImode] = VECTOR_VSX;
3257 rs6000_vector_align[TImode] = align64;
3260 /* TODO add paired floating point vector support. */
3262 /* Register class constraints for the constraints that depend on compile
3263 switches. When the VSX code was added, different constraints were added
3264 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3265 of the VSX registers are used. The register classes for scalar floating
3266 point types is set, based on whether we allow that type into the upper
3267 (Altivec) registers. GCC has register classes to target the Altivec
3268 registers for load/store operations, to select using a VSX memory
3269 operation instead of the traditional floating point operation. The
3270 constraints are:
3272 d - Register class to use with traditional DFmode instructions.
3273 f - Register class to use with traditional SFmode instructions.
3274 v - Altivec register.
3275 wa - Any VSX register.
3276 wc - Reserved to represent individual CR bits (used in LLVM).
3277 wd - Preferred register class for V2DFmode.
3278 wf - Preferred register class for V4SFmode.
3279 wg - Float register for power6x move insns.
3280 wh - FP register for direct move instructions.
3281 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3282 wj - FP or VSX register to hold 64-bit integers for direct moves.
3283 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3284 wl - Float register if we can do 32-bit signed int loads.
3285 wm - VSX register for ISA 2.07 direct move operations.
3286 wn - always NO_REGS.
3287 wr - GPR if 64-bit mode is permitted.
3288 ws - Register class to do ISA 2.06 DF operations.
3289 wt - VSX register for TImode in VSX registers.
3290 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3291 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3292 ww - Register class to do SF conversions in with VSX operations.
3293 wx - Float register if we can do 32-bit int stores.
3294 wy - Register class to do ISA 2.07 SF operations.
3295 wz - Float register if we can do 32-bit unsigned int loads.
3296 wH - Altivec register if SImode is allowed in VSX registers.
3297 wI - VSX register if SImode is allowed in VSX registers.
3298 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3299 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3301 if (TARGET_HARD_FLOAT)
3302 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3304 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3305 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3307 if (TARGET_VSX)
3309 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3310 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3311 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3312 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3313 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3314 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3315 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3318 /* Add conditional constraints based on various options, to allow us to
3319 collapse multiple insn patterns. */
3320 if (TARGET_ALTIVEC)
3321 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3323 if (TARGET_MFPGPR) /* DFmode */
3324 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3326 if (TARGET_LFIWAX)
3327 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3329 if (TARGET_DIRECT_MOVE)
3331 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3332 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3333 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3334 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3335 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3336 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3339 if (TARGET_POWERPC64)
3341 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3342 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3345 if (TARGET_P8_VECTOR) /* SFmode */
3347 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3348 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3349 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3351 else if (TARGET_VSX)
3352 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3354 if (TARGET_STFIWX)
3355 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3357 if (TARGET_LFIWZX)
3358 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3360 if (TARGET_FLOAT128_TYPE)
3362 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3363 if (FLOAT128_IEEE_P (TFmode))
3364 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3367 if (TARGET_P9_VECTOR)
3369 /* Support for new D-form instructions. */
3370 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3372 /* Support for ISA 3.0 (power9) vectors. */
3373 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3376 /* Support for new direct moves (ISA 3.0 + 64bit). */
3377 if (TARGET_DIRECT_MOVE_128)
3378 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3380 /* Support small integers in VSX registers. */
3381 if (TARGET_P8_VECTOR)
3383 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3384 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3385 if (TARGET_P9_VECTOR)
3387 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3388 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3392 /* Set up the reload helper and direct move functions. */
3393 if (TARGET_VSX || TARGET_ALTIVEC)
3395 if (TARGET_64BIT)
3397 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3398 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3399 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3400 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3401 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3402 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3403 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3404 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3405 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3406 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3407 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3408 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3409 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3410 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3411 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3412 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3413 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3414 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3415 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3416 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3418 if (FLOAT128_VECTOR_P (KFmode))
3420 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3421 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3424 if (FLOAT128_VECTOR_P (TFmode))
3426 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3427 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3430 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3431 available. */
3432 if (TARGET_NO_SDMODE_STACK)
3434 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3435 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3438 if (TARGET_VSX)
3440 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3441 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3444 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3446 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3447 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3448 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3449 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3450 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3451 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3452 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3453 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3454 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3456 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3457 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3458 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3459 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3460 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3461 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3462 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3463 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3464 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3466 if (FLOAT128_VECTOR_P (KFmode))
3468 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3469 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3472 if (FLOAT128_VECTOR_P (TFmode))
3474 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3475 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3479 else
3481 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3482 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3483 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3484 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3485 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3486 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3487 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3488 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3489 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3490 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3491 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3492 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3493 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3494 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3495 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3496 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3497 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3498 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3499 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3500 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3502 if (FLOAT128_VECTOR_P (KFmode))
3504 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3505 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3508 if (FLOAT128_IEEE_P (TFmode))
3510 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3511 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3514 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3515 available. */
3516 if (TARGET_NO_SDMODE_STACK)
3518 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3519 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3522 if (TARGET_VSX)
3524 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3525 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3528 if (TARGET_DIRECT_MOVE)
3530 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3531 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3532 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3536 reg_addr[DFmode].scalar_in_vmx_p = true;
3537 reg_addr[DImode].scalar_in_vmx_p = true;
3539 if (TARGET_P8_VECTOR)
3541 reg_addr[SFmode].scalar_in_vmx_p = true;
3542 reg_addr[SImode].scalar_in_vmx_p = true;
3544 if (TARGET_P9_VECTOR)
3546 reg_addr[HImode].scalar_in_vmx_p = true;
3547 reg_addr[QImode].scalar_in_vmx_p = true;
3552 /* Setup the fusion operations. */
3553 if (TARGET_P8_FUSION)
3555 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3556 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3557 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3558 if (TARGET_64BIT)
3559 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3562 if (TARGET_P9_FUSION)
3564 struct fuse_insns {
3565 enum machine_mode mode; /* mode of the fused type. */
3566 enum machine_mode pmode; /* pointer mode. */
3567 enum rs6000_reload_reg_type rtype; /* register type. */
3568 enum insn_code load; /* load insn. */
3569 enum insn_code store; /* store insn. */
3572 static const struct fuse_insns addis_insns[] = {
3573 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3574 CODE_FOR_fusion_vsx_di_sf_load,
3575 CODE_FOR_fusion_vsx_di_sf_store },
3577 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3578 CODE_FOR_fusion_vsx_si_sf_load,
3579 CODE_FOR_fusion_vsx_si_sf_store },
3581 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3582 CODE_FOR_fusion_vsx_di_df_load,
3583 CODE_FOR_fusion_vsx_di_df_store },
3585 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3586 CODE_FOR_fusion_vsx_si_df_load,
3587 CODE_FOR_fusion_vsx_si_df_store },
3589 { E_DImode, E_DImode, RELOAD_REG_FPR,
3590 CODE_FOR_fusion_vsx_di_di_load,
3591 CODE_FOR_fusion_vsx_di_di_store },
3593 { E_DImode, E_SImode, RELOAD_REG_FPR,
3594 CODE_FOR_fusion_vsx_si_di_load,
3595 CODE_FOR_fusion_vsx_si_di_store },
3597 { E_QImode, E_DImode, RELOAD_REG_GPR,
3598 CODE_FOR_fusion_gpr_di_qi_load,
3599 CODE_FOR_fusion_gpr_di_qi_store },
3601 { E_QImode, E_SImode, RELOAD_REG_GPR,
3602 CODE_FOR_fusion_gpr_si_qi_load,
3603 CODE_FOR_fusion_gpr_si_qi_store },
3605 { E_HImode, E_DImode, RELOAD_REG_GPR,
3606 CODE_FOR_fusion_gpr_di_hi_load,
3607 CODE_FOR_fusion_gpr_di_hi_store },
3609 { E_HImode, E_SImode, RELOAD_REG_GPR,
3610 CODE_FOR_fusion_gpr_si_hi_load,
3611 CODE_FOR_fusion_gpr_si_hi_store },
3613 { E_SImode, E_DImode, RELOAD_REG_GPR,
3614 CODE_FOR_fusion_gpr_di_si_load,
3615 CODE_FOR_fusion_gpr_di_si_store },
3617 { E_SImode, E_SImode, RELOAD_REG_GPR,
3618 CODE_FOR_fusion_gpr_si_si_load,
3619 CODE_FOR_fusion_gpr_si_si_store },
3621 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3622 CODE_FOR_fusion_gpr_di_sf_load,
3623 CODE_FOR_fusion_gpr_di_sf_store },
3625 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3626 CODE_FOR_fusion_gpr_si_sf_load,
3627 CODE_FOR_fusion_gpr_si_sf_store },
3629 { E_DImode, E_DImode, RELOAD_REG_GPR,
3630 CODE_FOR_fusion_gpr_di_di_load,
3631 CODE_FOR_fusion_gpr_di_di_store },
3633 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3634 CODE_FOR_fusion_gpr_di_df_load,
3635 CODE_FOR_fusion_gpr_di_df_store },
3638 machine_mode cur_pmode = Pmode;
3639 size_t i;
3641 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3643 machine_mode xmode = addis_insns[i].mode;
3644 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3646 if (addis_insns[i].pmode != cur_pmode)
3647 continue;
3649 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3650 continue;
3652 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3653 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3655 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3657 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3658 = addis_insns[i].load;
3659 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3660 = addis_insns[i].store;
3665 /* Note which types we support fusing TOC setup plus memory insn. We only do
3666 fused TOCs for medium/large code models. */
3667 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3668 && (TARGET_CMODEL != CMODEL_SMALL))
3670 reg_addr[QImode].fused_toc = true;
3671 reg_addr[HImode].fused_toc = true;
3672 reg_addr[SImode].fused_toc = true;
3673 reg_addr[DImode].fused_toc = true;
3674 if (TARGET_HARD_FLOAT)
3676 if (TARGET_SINGLE_FLOAT)
3677 reg_addr[SFmode].fused_toc = true;
3678 if (TARGET_DOUBLE_FLOAT)
3679 reg_addr[DFmode].fused_toc = true;
3683 /* Precalculate HARD_REGNO_NREGS. */
3684 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3685 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3686 rs6000_hard_regno_nregs[m][r]
3687 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3689 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3690 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3691 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3692 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3693 rs6000_hard_regno_mode_ok_p[m][r] = true;
3695 /* Precalculate CLASS_MAX_NREGS sizes. */
3696 for (c = 0; c < LIM_REG_CLASSES; ++c)
3698 int reg_size;
3700 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3701 reg_size = UNITS_PER_VSX_WORD;
3703 else if (c == ALTIVEC_REGS)
3704 reg_size = UNITS_PER_ALTIVEC_WORD;
3706 else if (c == FLOAT_REGS)
3707 reg_size = UNITS_PER_FP_WORD;
3709 else
3710 reg_size = UNITS_PER_WORD;
3712 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3714 machine_mode m2 = (machine_mode)m;
3715 int reg_size2 = reg_size;
3717 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3718 in VSX. */
3719 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3720 reg_size2 = UNITS_PER_FP_WORD;
3722 rs6000_class_max_nregs[m][c]
3723 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3727 /* Calculate which modes to automatically generate code to use a the
3728 reciprocal divide and square root instructions. In the future, possibly
3729 automatically generate the instructions even if the user did not specify
3730 -mrecip. The older machines double precision reciprocal sqrt estimate is
3731 not accurate enough. */
3732 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3733 if (TARGET_FRES)
3734 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3735 if (TARGET_FRE)
3736 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3737 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3738 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3739 if (VECTOR_UNIT_VSX_P (V2DFmode))
3740 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3742 if (TARGET_FRSQRTES)
3743 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3744 if (TARGET_FRSQRTE)
3745 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3746 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3747 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3748 if (VECTOR_UNIT_VSX_P (V2DFmode))
3749 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3751 if (rs6000_recip_control)
3753 if (!flag_finite_math_only)
3754 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3755 "-ffast-math");
3756 if (flag_trapping_math)
3757 warning (0, "%qs requires %qs or %qs", "-mrecip",
3758 "-fno-trapping-math", "-ffast-math");
3759 if (!flag_reciprocal_math)
3760 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3761 "-ffast-math");
3762 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3764 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3765 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3766 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3768 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3769 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3770 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3772 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3773 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3774 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3776 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3777 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3778 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3780 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3781 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3782 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3784 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3785 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3786 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3788 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3789 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3790 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3792 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3793 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3794 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3798 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3799 legitimate address support to figure out the appropriate addressing to
3800 use. */
3801 rs6000_setup_reg_addr_masks ();
3803 if (global_init_p || TARGET_DEBUG_TARGET)
3805 if (TARGET_DEBUG_REG)
3806 rs6000_debug_reg_global ();
3808 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3809 fprintf (stderr,
3810 "SImode variable mult cost = %d\n"
3811 "SImode constant mult cost = %d\n"
3812 "SImode short constant mult cost = %d\n"
3813 "DImode multipliciation cost = %d\n"
3814 "SImode division cost = %d\n"
3815 "DImode division cost = %d\n"
3816 "Simple fp operation cost = %d\n"
3817 "DFmode multiplication cost = %d\n"
3818 "SFmode division cost = %d\n"
3819 "DFmode division cost = %d\n"
3820 "cache line size = %d\n"
3821 "l1 cache size = %d\n"
3822 "l2 cache size = %d\n"
3823 "simultaneous prefetches = %d\n"
3824 "\n",
3825 rs6000_cost->mulsi,
3826 rs6000_cost->mulsi_const,
3827 rs6000_cost->mulsi_const9,
3828 rs6000_cost->muldi,
3829 rs6000_cost->divsi,
3830 rs6000_cost->divdi,
3831 rs6000_cost->fp,
3832 rs6000_cost->dmul,
3833 rs6000_cost->sdiv,
3834 rs6000_cost->ddiv,
3835 rs6000_cost->cache_line_size,
3836 rs6000_cost->l1_cache_size,
3837 rs6000_cost->l2_cache_size,
3838 rs6000_cost->simultaneous_prefetches);
3842 #if TARGET_MACHO
3843 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3845 static void
3846 darwin_rs6000_override_options (void)
3848 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3849 off. */
3850 rs6000_altivec_abi = 1;
3851 TARGET_ALTIVEC_VRSAVE = 1;
3852 rs6000_current_abi = ABI_DARWIN;
3854 if (DEFAULT_ABI == ABI_DARWIN
3855 && TARGET_64BIT)
3856 darwin_one_byte_bool = 1;
3858 if (TARGET_64BIT && ! TARGET_POWERPC64)
3860 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3861 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3863 if (flag_mkernel)
3865 rs6000_default_long_calls = 1;
3866 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3869 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3870 Altivec. */
3871 if (!flag_mkernel && !flag_apple_kext
3872 && TARGET_64BIT
3873 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3874 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3876 /* Unless the user (not the configurer) has explicitly overridden
3877 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3878 G4 unless targeting the kernel. */
3879 if (!flag_mkernel
3880 && !flag_apple_kext
3881 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3882 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3883 && ! global_options_set.x_rs6000_cpu_index)
3885 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3888 #endif
3890 /* If not otherwise specified by a target, make 'long double' equivalent to
3891 'double'. */
3893 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3894 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3895 #endif
3897 /* Return the builtin mask of the various options used that could affect which
3898 builtins were used. In the past we used target_flags, but we've run out of
3899 bits, and some options like PAIRED are no longer in target_flags. */
3901 HOST_WIDE_INT
3902 rs6000_builtin_mask_calculate (void)
3904 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3905 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3906 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3907 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3908 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3909 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3910 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3911 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3912 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3913 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3914 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3915 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3916 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3917 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3918 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3919 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3920 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3921 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3922 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3923 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3924 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3925 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3926 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3929 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3930 to clobber the XER[CA] bit because clobbering that bit without telling
3931 the compiler worked just fine with versions of GCC before GCC 5, and
3932 breaking a lot of older code in ways that are hard to track down is
3933 not such a great idea. */
3935 static rtx_insn *
3936 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3937 vec<const char *> &/*constraints*/,
3938 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3940 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3941 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3942 return NULL;
3945 /* Override command line options.
3947 Combine build-specific configuration information with options
3948 specified on the command line to set various state variables which
3949 influence code generation, optimization, and expansion of built-in
3950 functions. Assure that command-line configuration preferences are
3951 compatible with each other and with the build configuration; issue
3952 warnings while adjusting configuration or error messages while
3953 rejecting configuration.
3955 Upon entry to this function:
3957 This function is called once at the beginning of
3958 compilation, and then again at the start and end of compiling
3959 each section of code that has a different configuration, as
3960 indicated, for example, by adding the
3962 __attribute__((__target__("cpu=power9")))
3964 qualifier to a function definition or, for example, by bracketing
3965 code between
3967 #pragma GCC target("altivec")
3971 #pragma GCC reset_options
3973 directives. Parameter global_init_p is true for the initial
3974 invocation, which initializes global variables, and false for all
3975 subsequent invocations.
3978 Various global state information is assumed to be valid. This
3979 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3980 default CPU specified at build configure time, TARGET_DEFAULT,
3981 representing the default set of option flags for the default
3982 target, and global_options_set.x_rs6000_isa_flags, representing
3983 which options were requested on the command line.
3985 Upon return from this function:
3987 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3988 was set by name on the command line. Additionally, if certain
3989 attributes are automatically enabled or disabled by this function
3990 in order to assure compatibility between options and
3991 configuration, the flags associated with those attributes are
3992 also set. By setting these "explicit bits", we avoid the risk
3993 that other code might accidentally overwrite these particular
3994 attributes with "default values".
3996 The various bits of rs6000_isa_flags are set to indicate the
3997 target options that have been selected for the most current
3998 compilation efforts. This has the effect of also turning on the
3999 associated TARGET_XXX values since these are macros which are
4000 generally defined to test the corresponding bit of the
4001 rs6000_isa_flags variable.
4003 The variable rs6000_builtin_mask is set to represent the target
4004 options for the most current compilation efforts, consistent with
4005 the current contents of rs6000_isa_flags. This variable controls
4006 expansion of built-in functions.
4008 Various other global variables and fields of global structures
4009 (over 50 in all) are initialized to reflect the desired options
4010 for the most current compilation efforts. */
4012 static bool
4013 rs6000_option_override_internal (bool global_init_p)
4015 bool ret = true;
4017 HOST_WIDE_INT set_masks;
4018 HOST_WIDE_INT ignore_masks;
4019 int cpu_index = -1;
4020 int tune_index;
4021 struct cl_target_option *main_target_opt
4022 = ((global_init_p || target_option_default_node == NULL)
4023 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4025 /* Print defaults. */
4026 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4027 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4029 /* Remember the explicit arguments. */
4030 if (global_init_p)
4031 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4033 /* We plan to deprecate the -maltivec=be option. For now, just
4034 issue a warning message. */
4035 if (global_init_p
4036 && rs6000_altivec_element_order == 2)
4037 warning (0, "%qs command-line option is deprecated",
4038 "-maltivec=be");
4040 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4041 library functions, so warn about it. The flag may be useful for
4042 performance studies from time to time though, so don't disable it
4043 entirely. */
4044 if (global_options_set.x_rs6000_alignment_flags
4045 && rs6000_alignment_flags == MASK_ALIGN_POWER
4046 && DEFAULT_ABI == ABI_DARWIN
4047 && TARGET_64BIT)
4048 warning (0, "%qs is not supported for 64-bit Darwin;"
4049 " it is incompatible with the installed C and C++ libraries",
4050 "-malign-power");
4052 /* Numerous experiment shows that IRA based loop pressure
4053 calculation works better for RTL loop invariant motion on targets
4054 with enough (>= 32) registers. It is an expensive optimization.
4055 So it is on only for peak performance. */
4056 if (optimize >= 3 && global_init_p
4057 && !global_options_set.x_flag_ira_loop_pressure)
4058 flag_ira_loop_pressure = 1;
4060 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4061 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4062 options were already specified. */
4063 if (flag_sanitize & SANITIZE_USER_ADDRESS
4064 && !global_options_set.x_flag_asynchronous_unwind_tables)
4065 flag_asynchronous_unwind_tables = 1;
4067 /* Set the pointer size. */
4068 if (TARGET_64BIT)
4070 rs6000_pmode = DImode;
4071 rs6000_pointer_size = 64;
4073 else
4075 rs6000_pmode = SImode;
4076 rs6000_pointer_size = 32;
4079 /* Some OSs don't support saving the high part of 64-bit registers on context
4080 switch. Other OSs don't support saving Altivec registers. On those OSs,
4081 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4082 if the user wants either, the user must explicitly specify them and we
4083 won't interfere with the user's specification. */
4085 set_masks = POWERPC_MASKS;
4086 #ifdef OS_MISSING_POWERPC64
4087 if (OS_MISSING_POWERPC64)
4088 set_masks &= ~OPTION_MASK_POWERPC64;
4089 #endif
4090 #ifdef OS_MISSING_ALTIVEC
4091 if (OS_MISSING_ALTIVEC)
4092 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4093 | OTHER_VSX_VECTOR_MASKS);
4094 #endif
4096 /* Don't override by the processor default if given explicitly. */
4097 set_masks &= ~rs6000_isa_flags_explicit;
4099 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4100 the cpu in a target attribute or pragma, but did not specify a tuning
4101 option, use the cpu for the tuning option rather than the option specified
4102 with -mtune on the command line. Process a '--with-cpu' configuration
4103 request as an implicit --cpu. */
4104 if (rs6000_cpu_index >= 0)
4105 cpu_index = rs6000_cpu_index;
4106 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4107 cpu_index = main_target_opt->x_rs6000_cpu_index;
4108 else if (OPTION_TARGET_CPU_DEFAULT)
4109 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4111 if (cpu_index >= 0)
4113 const char *unavailable_cpu = NULL;
4114 switch (processor_target_table[cpu_index].processor)
4116 #ifndef HAVE_AS_POWER9
4117 case PROCESSOR_POWER9:
4118 unavailable_cpu = "power9";
4119 break;
4120 #endif
4121 #ifndef HAVE_AS_POWER8
4122 case PROCESSOR_POWER8:
4123 unavailable_cpu = "power8";
4124 break;
4125 #endif
4126 #ifndef HAVE_AS_POPCNTD
4127 case PROCESSOR_POWER7:
4128 unavailable_cpu = "power7";
4129 break;
4130 #endif
4131 #ifndef HAVE_AS_DFP
4132 case PROCESSOR_POWER6:
4133 unavailable_cpu = "power6";
4134 break;
4135 #endif
4136 #ifndef HAVE_AS_POPCNTB
4137 case PROCESSOR_POWER5:
4138 unavailable_cpu = "power5";
4139 break;
4140 #endif
4141 default:
4142 break;
4144 if (unavailable_cpu)
4146 cpu_index = -1;
4147 warning (0, "will not generate %qs instructions because "
4148 "assembler lacks %qs support", unavailable_cpu,
4149 unavailable_cpu);
4153 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4154 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4155 with those from the cpu, except for options that were explicitly set. If
4156 we don't have a cpu, do not override the target bits set in
4157 TARGET_DEFAULT. */
4158 if (cpu_index >= 0)
4160 rs6000_cpu_index = cpu_index;
4161 rs6000_isa_flags &= ~set_masks;
4162 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4163 & set_masks);
4165 else
4167 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4168 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4169 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4170 to using rs6000_isa_flags, we need to do the initialization here.
4172 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4173 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4174 HOST_WIDE_INT flags;
4175 if (TARGET_DEFAULT)
4176 flags = TARGET_DEFAULT;
4177 else
4179 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4180 const char *default_cpu = (!TARGET_POWERPC64
4181 ? "powerpc"
4182 : (BYTES_BIG_ENDIAN
4183 ? "powerpc64"
4184 : "powerpc64le"));
4185 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4186 flags = processor_target_table[default_cpu_index].target_enable;
4188 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4191 if (rs6000_tune_index >= 0)
4192 tune_index = rs6000_tune_index;
4193 else if (cpu_index >= 0)
4194 rs6000_tune_index = tune_index = cpu_index;
4195 else
4197 size_t i;
4198 enum processor_type tune_proc
4199 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4201 tune_index = -1;
4202 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4203 if (processor_target_table[i].processor == tune_proc)
4205 tune_index = i;
4206 break;
4210 if (cpu_index >= 0)
4211 rs6000_cpu = processor_target_table[cpu_index].processor;
4212 else
4213 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
4215 gcc_assert (tune_index >= 0);
4216 rs6000_tune = processor_target_table[tune_index].processor;
4218 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4219 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4220 || rs6000_cpu == PROCESSOR_PPCE5500)
4222 if (TARGET_ALTIVEC)
4223 error ("AltiVec not supported in this target");
4226 /* If we are optimizing big endian systems for space, use the load/store
4227 multiple instructions. */
4228 if (BYTES_BIG_ENDIAN && optimize_size)
4229 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
4231 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4232 because the hardware doesn't support the instructions used in little
4233 endian mode, and causes an alignment trap. The 750 does not cause an
4234 alignment trap (except when the target is unaligned). */
4236 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
4238 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4239 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4240 warning (0, "%qs is not supported on little endian systems",
4241 "-mmultiple");
4244 /* If little-endian, default to -mstrict-align on older processors.
4245 Testing for htm matches power8 and later. */
4246 if (!BYTES_BIG_ENDIAN
4247 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4248 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4250 /* -maltivec={le,be} implies -maltivec. */
4251 if (rs6000_altivec_element_order != 0)
4252 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4254 /* Disallow -maltivec=le in big endian mode for now. This is not
4255 known to be useful for anyone. */
4256 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4258 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4259 rs6000_altivec_element_order = 0;
4262 if (!rs6000_fold_gimple)
4263 fprintf (stderr,
4264 "gimple folding of rs6000 builtins has been disabled.\n");
4266 /* Add some warnings for VSX. */
4267 if (TARGET_VSX)
4269 const char *msg = NULL;
4270 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4272 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4273 msg = N_("-mvsx requires hardware floating point");
4274 else
4276 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4277 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4280 else if (TARGET_PAIRED_FLOAT)
4281 msg = N_("-mvsx and -mpaired are incompatible");
4282 else if (TARGET_AVOID_XFORM > 0)
4283 msg = N_("-mvsx needs indexed addressing");
4284 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4285 & OPTION_MASK_ALTIVEC))
4287 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4288 msg = N_("-mvsx and -mno-altivec are incompatible");
4289 else
4290 msg = N_("-mno-altivec disables vsx");
4293 if (msg)
4295 warning (0, msg);
4296 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4297 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4301 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4302 the -mcpu setting to enable options that conflict. */
4303 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4304 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4305 | OPTION_MASK_ALTIVEC
4306 | OPTION_MASK_VSX)) != 0)
4307 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4308 | OPTION_MASK_DIRECT_MOVE)
4309 & ~rs6000_isa_flags_explicit);
4311 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4312 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4314 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4315 off all of the options that depend on those flags. */
4316 ignore_masks = rs6000_disable_incompatible_switches ();
4318 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4319 unless the user explicitly used the -mno-<option> to disable the code. */
4320 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4321 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4322 else if (TARGET_P9_MINMAX)
4324 if (cpu_index >= 0)
4326 if (cpu_index == PROCESSOR_POWER9)
4328 /* legacy behavior: allow -mcpu=power9 with certain
4329 capabilities explicitly disabled. */
4330 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4332 else
4333 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4334 "for <xxx> less than power9", "-mcpu");
4336 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4337 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4338 & rs6000_isa_flags_explicit))
4339 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4340 were explicitly cleared. */
4341 error ("%qs incompatible with explicitly disabled options",
4342 "-mpower9-minmax");
4343 else
4344 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4346 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4347 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4348 else if (TARGET_VSX)
4349 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4350 else if (TARGET_POPCNTD)
4351 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4352 else if (TARGET_DFP)
4353 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4354 else if (TARGET_CMPB)
4355 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4356 else if (TARGET_FPRND)
4357 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4358 else if (TARGET_POPCNTB)
4359 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4360 else if (TARGET_ALTIVEC)
4361 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4363 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4365 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4366 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4367 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4370 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4372 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4373 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4374 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4377 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4379 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4380 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4381 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4384 if (TARGET_P8_VECTOR && !TARGET_VSX)
4386 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4387 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4388 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4389 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4391 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4392 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4393 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4395 else
4397 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4398 not explicit. */
4399 rs6000_isa_flags |= OPTION_MASK_VSX;
4400 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4404 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4406 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4407 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4408 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4411 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4412 silently turn off quad memory mode. */
4413 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4415 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4416 warning (0, N_("-mquad-memory requires 64-bit mode"));
4418 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4419 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4421 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4422 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4425 /* Non-atomic quad memory load/store are disabled for little endian, since
4426 the words are reversed, but atomic operations can still be done by
4427 swapping the words. */
4428 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4430 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4431 warning (0, N_("-mquad-memory is not available in little endian "
4432 "mode"));
4434 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4437 /* Assume if the user asked for normal quad memory instructions, they want
4438 the atomic versions as well, unless they explicity told us not to use quad
4439 word atomic instructions. */
4440 if (TARGET_QUAD_MEMORY
4441 && !TARGET_QUAD_MEMORY_ATOMIC
4442 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4443 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4445 /* If we can shrink-wrap the TOC register save separately, then use
4446 -msave-toc-indirect unless explicitly disabled. */
4447 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4448 && flag_shrink_wrap_separate
4449 && optimize_function_for_speed_p (cfun))
4450 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4452 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4453 generating power8 instructions. */
4454 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4455 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4456 & OPTION_MASK_P8_FUSION);
4458 /* Setting additional fusion flags turns on base fusion. */
4459 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4461 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4463 if (TARGET_P8_FUSION_SIGN)
4464 error ("%qs requires %qs", "-mpower8-fusion-sign",
4465 "-mpower8-fusion");
4467 if (TARGET_TOC_FUSION)
4468 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4470 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4472 else
4473 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4476 /* Power9 fusion is a superset over power8 fusion. */
4477 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4479 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4481 /* We prefer to not mention undocumented options in
4482 error messages. However, if users have managed to select
4483 power9-fusion without selecting power8-fusion, they
4484 already know about undocumented flags. */
4485 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4486 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4488 else
4489 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4492 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4493 generating power9 instructions. */
4494 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4495 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4496 & OPTION_MASK_P9_FUSION);
4498 /* Power8 does not fuse sign extended loads with the addis. If we are
4499 optimizing at high levels for speed, convert a sign extended load into a
4500 zero extending load, and an explicit sign extension. */
4501 if (TARGET_P8_FUSION
4502 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4503 && optimize_function_for_speed_p (cfun)
4504 && optimize >= 3)
4505 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4507 /* TOC fusion requires 64-bit and medium/large code model. */
4508 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4510 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4511 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4512 warning (0, N_("-mtoc-fusion requires 64-bit"));
4515 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4517 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4518 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4519 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4522 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4523 model. */
4524 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4525 && (TARGET_CMODEL != CMODEL_SMALL)
4526 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4527 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4529 /* ISA 3.0 vector instructions include ISA 2.07. */
4530 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4532 /* We prefer to not mention undocumented options in
4533 error messages. However, if users have managed to select
4534 power9-vector without selecting power8-vector, they
4535 already know about undocumented flags. */
4536 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4537 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4538 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4539 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4541 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4542 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4543 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4545 else
4547 /* OPTION_MASK_P9_VECTOR is explicit and
4548 OPTION_MASK_P8_VECTOR is not explicit. */
4549 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4550 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4554 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4555 support. If we only have ISA 2.06 support, and the user did not specify
4556 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4557 but we don't enable the full vectorization support */
4558 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4559 TARGET_ALLOW_MOVMISALIGN = 1;
4561 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4563 if (TARGET_ALLOW_MOVMISALIGN > 0
4564 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4565 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4567 TARGET_ALLOW_MOVMISALIGN = 0;
4570 /* Determine when unaligned vector accesses are permitted, and when
4571 they are preferred over masked Altivec loads. Note that if
4572 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4573 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4574 not true. */
4575 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4577 if (!TARGET_VSX)
4579 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4580 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4582 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4585 else if (!TARGET_ALLOW_MOVMISALIGN)
4587 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4588 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4589 "-mallow-movmisalign");
4591 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4595 /* Set long double size before the IEEE 128-bit tests. */
4596 if (!global_options_set.x_rs6000_long_double_type_size)
4598 if (main_target_opt != NULL
4599 && (main_target_opt->x_rs6000_long_double_type_size
4600 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4601 error ("target attribute or pragma changes long double size");
4602 else
4603 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4606 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4607 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4608 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4609 those systems will not pick up this default. Warn if the user changes the
4610 default unless either the user used the -Wno-psabi option, or the compiler
4611 was built to enable multilibs to switch between the two long double
4612 types. */
4613 if (!global_options_set.x_rs6000_ieeequad)
4614 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4616 else if (!TARGET_IEEEQUAD_MULTILIB
4617 && rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT
4618 && TARGET_LONG_DOUBLE_128)
4620 static bool warned_change_long_double;
4621 if (!warned_change_long_double)
4623 warned_change_long_double = true;
4624 if (TARGET_IEEEQUAD)
4625 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4626 else
4627 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4631 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4632 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4633 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4634 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4635 the keyword as well as the type. */
4636 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4638 /* IEEE 128-bit floating point requires VSX support. */
4639 if (TARGET_FLOAT128_KEYWORD)
4641 if (!TARGET_VSX)
4643 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4644 error ("%qs requires VSX support", "-mfloat128");
4646 TARGET_FLOAT128_TYPE = 0;
4647 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4648 | OPTION_MASK_FLOAT128_HW);
4650 else if (!TARGET_FLOAT128_TYPE)
4652 TARGET_FLOAT128_TYPE = 1;
4653 warning (0, "The -mfloat128 option may not be fully supported");
4657 /* Enable the __float128 keyword under Linux by default. */
4658 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4659 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4660 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4662 /* If we have are supporting the float128 type and full ISA 3.0 support,
4663 enable -mfloat128-hardware by default. However, don't enable the
4664 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4665 because sometimes the compiler wants to put things in an integer
4666 container, and if we don't have __int128 support, it is impossible. */
4667 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4668 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4669 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4670 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4672 if (TARGET_FLOAT128_HW
4673 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4675 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4676 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4678 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4681 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4683 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4684 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4686 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4689 /* Print the options after updating the defaults. */
4690 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4691 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4693 /* E500mc does "better" if we inline more aggressively. Respect the
4694 user's opinion, though. */
4695 if (rs6000_block_move_inline_limit == 0
4696 && (rs6000_tune == PROCESSOR_PPCE500MC
4697 || rs6000_tune == PROCESSOR_PPCE500MC64
4698 || rs6000_tune == PROCESSOR_PPCE5500
4699 || rs6000_tune == PROCESSOR_PPCE6500))
4700 rs6000_block_move_inline_limit = 128;
4702 /* store_one_arg depends on expand_block_move to handle at least the
4703 size of reg_parm_stack_space. */
4704 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4705 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4707 if (global_init_p)
4709 /* If the appropriate debug option is enabled, replace the target hooks
4710 with debug versions that call the real version and then prints
4711 debugging information. */
4712 if (TARGET_DEBUG_COST)
4714 targetm.rtx_costs = rs6000_debug_rtx_costs;
4715 targetm.address_cost = rs6000_debug_address_cost;
4716 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4719 if (TARGET_DEBUG_ADDR)
4721 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4722 targetm.legitimize_address = rs6000_debug_legitimize_address;
4723 rs6000_secondary_reload_class_ptr
4724 = rs6000_debug_secondary_reload_class;
4725 targetm.secondary_memory_needed
4726 = rs6000_debug_secondary_memory_needed;
4727 targetm.can_change_mode_class
4728 = rs6000_debug_can_change_mode_class;
4729 rs6000_preferred_reload_class_ptr
4730 = rs6000_debug_preferred_reload_class;
4731 rs6000_legitimize_reload_address_ptr
4732 = rs6000_debug_legitimize_reload_address;
4733 rs6000_mode_dependent_address_ptr
4734 = rs6000_debug_mode_dependent_address;
4737 if (rs6000_veclibabi_name)
4739 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4740 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4741 else
4743 error ("unknown vectorization library ABI type (%qs) for "
4744 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4745 ret = false;
4750 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4751 target attribute or pragma which automatically enables both options,
4752 unless the altivec ABI was set. This is set by default for 64-bit, but
4753 not for 32-bit. */
4754 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4756 TARGET_FLOAT128_TYPE = 0;
4757 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4758 | OPTION_MASK_FLOAT128_KEYWORD)
4759 & ~rs6000_isa_flags_explicit);
4762 /* Enable Altivec ABI for AIX -maltivec. */
4763 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4765 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4766 error ("target attribute or pragma changes AltiVec ABI");
4767 else
4768 rs6000_altivec_abi = 1;
4771 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4772 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4773 be explicitly overridden in either case. */
4774 if (TARGET_ELF)
4776 if (!global_options_set.x_rs6000_altivec_abi
4777 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4779 if (main_target_opt != NULL &&
4780 !main_target_opt->x_rs6000_altivec_abi)
4781 error ("target attribute or pragma changes AltiVec ABI");
4782 else
4783 rs6000_altivec_abi = 1;
4787 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4788 So far, the only darwin64 targets are also MACH-O. */
4789 if (TARGET_MACHO
4790 && DEFAULT_ABI == ABI_DARWIN
4791 && TARGET_64BIT)
4793 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4794 error ("target attribute or pragma changes darwin64 ABI");
4795 else
4797 rs6000_darwin64_abi = 1;
4798 /* Default to natural alignment, for better performance. */
4799 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4803 /* Place FP constants in the constant pool instead of TOC
4804 if section anchors enabled. */
4805 if (flag_section_anchors
4806 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4807 TARGET_NO_FP_IN_TOC = 1;
4809 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4810 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4812 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4813 SUBTARGET_OVERRIDE_OPTIONS;
4814 #endif
4815 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4816 SUBSUBTARGET_OVERRIDE_OPTIONS;
4817 #endif
4818 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4819 SUB3TARGET_OVERRIDE_OPTIONS;
4820 #endif
4822 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4823 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4825 if (main_target_opt)
4827 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4828 error ("target attribute or pragma changes single precision floating "
4829 "point");
4830 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4831 error ("target attribute or pragma changes double precision floating "
4832 "point");
4835 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4836 && rs6000_tune != PROCESSOR_POWER5
4837 && rs6000_tune != PROCESSOR_POWER6
4838 && rs6000_tune != PROCESSOR_POWER7
4839 && rs6000_tune != PROCESSOR_POWER8
4840 && rs6000_tune != PROCESSOR_POWER9
4841 && rs6000_tune != PROCESSOR_PPCA2
4842 && rs6000_tune != PROCESSOR_CELL
4843 && rs6000_tune != PROCESSOR_PPC476);
4844 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4845 || rs6000_tune == PROCESSOR_POWER5
4846 || rs6000_tune == PROCESSOR_POWER7
4847 || rs6000_tune == PROCESSOR_POWER8);
4848 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4849 || rs6000_tune == PROCESSOR_POWER5
4850 || rs6000_tune == PROCESSOR_POWER6
4851 || rs6000_tune == PROCESSOR_POWER7
4852 || rs6000_tune == PROCESSOR_POWER8
4853 || rs6000_tune == PROCESSOR_POWER9
4854 || rs6000_tune == PROCESSOR_PPCE500MC
4855 || rs6000_tune == PROCESSOR_PPCE500MC64
4856 || rs6000_tune == PROCESSOR_PPCE5500
4857 || rs6000_tune == PROCESSOR_PPCE6500);
4859 /* Allow debug switches to override the above settings. These are set to -1
4860 in rs6000.opt to indicate the user hasn't directly set the switch. */
4861 if (TARGET_ALWAYS_HINT >= 0)
4862 rs6000_always_hint = TARGET_ALWAYS_HINT;
4864 if (TARGET_SCHED_GROUPS >= 0)
4865 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4867 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4868 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4870 rs6000_sched_restricted_insns_priority
4871 = (rs6000_sched_groups ? 1 : 0);
4873 /* Handle -msched-costly-dep option. */
4874 rs6000_sched_costly_dep
4875 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4877 if (rs6000_sched_costly_dep_str)
4879 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4880 rs6000_sched_costly_dep = no_dep_costly;
4881 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4882 rs6000_sched_costly_dep = all_deps_costly;
4883 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4884 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4885 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4886 rs6000_sched_costly_dep = store_to_load_dep_costly;
4887 else
4888 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4889 atoi (rs6000_sched_costly_dep_str));
4892 /* Handle -minsert-sched-nops option. */
4893 rs6000_sched_insert_nops
4894 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4896 if (rs6000_sched_insert_nops_str)
4898 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4899 rs6000_sched_insert_nops = sched_finish_none;
4900 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4901 rs6000_sched_insert_nops = sched_finish_pad_groups;
4902 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4903 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4904 else
4905 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4906 atoi (rs6000_sched_insert_nops_str));
4909 /* Handle stack protector */
4910 if (!global_options_set.x_rs6000_stack_protector_guard)
4911 #ifdef TARGET_THREAD_SSP_OFFSET
4912 rs6000_stack_protector_guard = SSP_TLS;
4913 #else
4914 rs6000_stack_protector_guard = SSP_GLOBAL;
4915 #endif
4917 #ifdef TARGET_THREAD_SSP_OFFSET
4918 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4919 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4920 #endif
4922 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4924 char *endp;
4925 const char *str = rs6000_stack_protector_guard_offset_str;
4927 errno = 0;
4928 long offset = strtol (str, &endp, 0);
4929 if (!*str || *endp || errno)
4930 error ("%qs is not a valid number in %qs", str,
4931 "-mstack-protector-guard-offset=");
4933 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4934 || (TARGET_64BIT && (offset & 3)))
4935 error ("%qs is not a valid offset in %qs", str,
4936 "-mstack-protector-guard-offset=");
4938 rs6000_stack_protector_guard_offset = offset;
4941 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4943 const char *str = rs6000_stack_protector_guard_reg_str;
4944 int reg = decode_reg_name (str);
4946 if (!IN_RANGE (reg, 1, 31))
4947 error ("%qs is not a valid base register in %qs", str,
4948 "-mstack-protector-guard-reg=");
4950 rs6000_stack_protector_guard_reg = reg;
4953 if (rs6000_stack_protector_guard == SSP_TLS
4954 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4955 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4957 if (global_init_p)
4959 #ifdef TARGET_REGNAMES
4960 /* If the user desires alternate register names, copy in the
4961 alternate names now. */
4962 if (TARGET_REGNAMES)
4963 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4964 #endif
4966 /* Set aix_struct_return last, after the ABI is determined.
4967 If -maix-struct-return or -msvr4-struct-return was explicitly
4968 used, don't override with the ABI default. */
4969 if (!global_options_set.x_aix_struct_return)
4970 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4972 #if 0
4973 /* IBM XL compiler defaults to unsigned bitfields. */
4974 if (TARGET_XL_COMPAT)
4975 flag_signed_bitfields = 0;
4976 #endif
4978 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4979 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4981 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4983 /* We can only guarantee the availability of DI pseudo-ops when
4984 assembling for 64-bit targets. */
4985 if (!TARGET_64BIT)
4987 targetm.asm_out.aligned_op.di = NULL;
4988 targetm.asm_out.unaligned_op.di = NULL;
4992 /* Set branch target alignment, if not optimizing for size. */
4993 if (!optimize_size)
4995 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4996 aligned 8byte to avoid misprediction by the branch predictor. */
4997 if (rs6000_tune == PROCESSOR_TITAN
4998 || rs6000_tune == PROCESSOR_CELL)
5000 if (align_functions <= 0)
5001 align_functions = 8;
5002 if (align_jumps <= 0)
5003 align_jumps = 8;
5004 if (align_loops <= 0)
5005 align_loops = 8;
5007 if (rs6000_align_branch_targets)
5009 if (align_functions <= 0)
5010 align_functions = 16;
5011 if (align_jumps <= 0)
5012 align_jumps = 16;
5013 if (align_loops <= 0)
5015 can_override_loop_align = 1;
5016 align_loops = 16;
5019 if (align_jumps_max_skip <= 0)
5020 align_jumps_max_skip = 15;
5021 if (align_loops_max_skip <= 0)
5022 align_loops_max_skip = 15;
5025 /* Arrange to save and restore machine status around nested functions. */
5026 init_machine_status = rs6000_init_machine_status;
5028 /* We should always be splitting complex arguments, but we can't break
5029 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5030 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5031 targetm.calls.split_complex_arg = NULL;
5033 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5034 if (DEFAULT_ABI == ABI_AIX)
5035 targetm.calls.custom_function_descriptors = 0;
5038 /* Initialize rs6000_cost with the appropriate target costs. */
5039 if (optimize_size)
5040 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5041 else
5042 switch (rs6000_tune)
5044 case PROCESSOR_RS64A:
5045 rs6000_cost = &rs64a_cost;
5046 break;
5048 case PROCESSOR_MPCCORE:
5049 rs6000_cost = &mpccore_cost;
5050 break;
5052 case PROCESSOR_PPC403:
5053 rs6000_cost = &ppc403_cost;
5054 break;
5056 case PROCESSOR_PPC405:
5057 rs6000_cost = &ppc405_cost;
5058 break;
5060 case PROCESSOR_PPC440:
5061 rs6000_cost = &ppc440_cost;
5062 break;
5064 case PROCESSOR_PPC476:
5065 rs6000_cost = &ppc476_cost;
5066 break;
5068 case PROCESSOR_PPC601:
5069 rs6000_cost = &ppc601_cost;
5070 break;
5072 case PROCESSOR_PPC603:
5073 rs6000_cost = &ppc603_cost;
5074 break;
5076 case PROCESSOR_PPC604:
5077 rs6000_cost = &ppc604_cost;
5078 break;
5080 case PROCESSOR_PPC604e:
5081 rs6000_cost = &ppc604e_cost;
5082 break;
5084 case PROCESSOR_PPC620:
5085 rs6000_cost = &ppc620_cost;
5086 break;
5088 case PROCESSOR_PPC630:
5089 rs6000_cost = &ppc630_cost;
5090 break;
5092 case PROCESSOR_CELL:
5093 rs6000_cost = &ppccell_cost;
5094 break;
5096 case PROCESSOR_PPC750:
5097 case PROCESSOR_PPC7400:
5098 rs6000_cost = &ppc750_cost;
5099 break;
5101 case PROCESSOR_PPC7450:
5102 rs6000_cost = &ppc7450_cost;
5103 break;
5105 case PROCESSOR_PPC8540:
5106 case PROCESSOR_PPC8548:
5107 rs6000_cost = &ppc8540_cost;
5108 break;
5110 case PROCESSOR_PPCE300C2:
5111 case PROCESSOR_PPCE300C3:
5112 rs6000_cost = &ppce300c2c3_cost;
5113 break;
5115 case PROCESSOR_PPCE500MC:
5116 rs6000_cost = &ppce500mc_cost;
5117 break;
5119 case PROCESSOR_PPCE500MC64:
5120 rs6000_cost = &ppce500mc64_cost;
5121 break;
5123 case PROCESSOR_PPCE5500:
5124 rs6000_cost = &ppce5500_cost;
5125 break;
5127 case PROCESSOR_PPCE6500:
5128 rs6000_cost = &ppce6500_cost;
5129 break;
5131 case PROCESSOR_TITAN:
5132 rs6000_cost = &titan_cost;
5133 break;
5135 case PROCESSOR_POWER4:
5136 case PROCESSOR_POWER5:
5137 rs6000_cost = &power4_cost;
5138 break;
5140 case PROCESSOR_POWER6:
5141 rs6000_cost = &power6_cost;
5142 break;
5144 case PROCESSOR_POWER7:
5145 rs6000_cost = &power7_cost;
5146 break;
5148 case PROCESSOR_POWER8:
5149 rs6000_cost = &power8_cost;
5150 break;
5152 case PROCESSOR_POWER9:
5153 rs6000_cost = &power9_cost;
5154 break;
5156 case PROCESSOR_PPCA2:
5157 rs6000_cost = &ppca2_cost;
5158 break;
5160 default:
5161 gcc_unreachable ();
5164 if (global_init_p)
5166 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5167 rs6000_cost->simultaneous_prefetches,
5168 global_options.x_param_values,
5169 global_options_set.x_param_values);
5170 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5171 global_options.x_param_values,
5172 global_options_set.x_param_values);
5173 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5174 rs6000_cost->cache_line_size,
5175 global_options.x_param_values,
5176 global_options_set.x_param_values);
5177 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5178 global_options.x_param_values,
5179 global_options_set.x_param_values);
5181 /* Increase loop peeling limits based on performance analysis. */
5182 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5183 global_options.x_param_values,
5184 global_options_set.x_param_values);
5185 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5189 /* Use the 'model' -fsched-pressure algorithm by default. */
5190 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5191 SCHED_PRESSURE_MODEL,
5192 global_options.x_param_values,
5193 global_options_set.x_param_values);
5195 /* If using typedef char *va_list, signal that
5196 __builtin_va_start (&ap, 0) can be optimized to
5197 ap = __builtin_next_arg (0). */
5198 if (DEFAULT_ABI != ABI_V4)
5199 targetm.expand_builtin_va_start = NULL;
5202 /* Set up single/double float flags.
5203 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5204 then set both flags. */
5205 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5206 rs6000_single_float = rs6000_double_float = 1;
5208 /* If not explicitly specified via option, decide whether to generate indexed
5209 load/store instructions. A value of -1 indicates that the
5210 initial value of this variable has not been overwritten. During
5211 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5212 if (TARGET_AVOID_XFORM == -1)
5213 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5214 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5215 need indexed accesses and the type used is the scalar type of the element
5216 being loaded or stored. */
5217 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
5218 && !TARGET_ALTIVEC);
5220 /* Set the -mrecip options. */
5221 if (rs6000_recip_name)
5223 char *p = ASTRDUP (rs6000_recip_name);
5224 char *q;
5225 unsigned int mask, i;
5226 bool invert;
5228 while ((q = strtok (p, ",")) != NULL)
5230 p = NULL;
5231 if (*q == '!')
5233 invert = true;
5234 q++;
5236 else
5237 invert = false;
5239 if (!strcmp (q, "default"))
5240 mask = ((TARGET_RECIP_PRECISION)
5241 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5242 else
5244 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5245 if (!strcmp (q, recip_options[i].string))
5247 mask = recip_options[i].mask;
5248 break;
5251 if (i == ARRAY_SIZE (recip_options))
5253 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5254 invert = false;
5255 mask = 0;
5256 ret = false;
5260 if (invert)
5261 rs6000_recip_control &= ~mask;
5262 else
5263 rs6000_recip_control |= mask;
5267 /* Set the builtin mask of the various options used that could affect which
5268 builtins were used. In the past we used target_flags, but we've run out
5269 of bits, and some options like PAIRED are no longer in target_flags. */
5270 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5271 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5272 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5273 rs6000_builtin_mask);
5275 /* Initialize all of the registers. */
5276 rs6000_init_hard_regno_mode_ok (global_init_p);
5278 /* Save the initial options in case the user does function specific options */
5279 if (global_init_p)
5280 target_option_default_node = target_option_current_node
5281 = build_target_option_node (&global_options);
5283 /* If not explicitly specified via option, decide whether to generate the
5284 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5285 if (TARGET_LINK_STACK == -1)
5286 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
5288 /* Deprecate use of -mno-speculate-indirect-jumps. */
5289 if (!rs6000_speculate_indirect_jumps)
5290 warning (0, "%qs is deprecated and not recommended in any circumstances",
5291 "-mno-speculate-indirect-jumps");
5293 return ret;
5296 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5297 define the target cpu type. */
5299 static void
5300 rs6000_option_override (void)
5302 (void) rs6000_option_override_internal (true);
5306 /* Implement targetm.vectorize.builtin_mask_for_load. */
5307 static tree
5308 rs6000_builtin_mask_for_load (void)
5310 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5311 if ((TARGET_ALTIVEC && !TARGET_VSX)
5312 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5313 return altivec_builtin_mask_for_load;
5314 else
5315 return 0;
5318 /* Implement LOOP_ALIGN. */
5320 rs6000_loop_align (rtx label)
5322 basic_block bb;
5323 int ninsns;
5325 /* Don't override loop alignment if -falign-loops was specified. */
5326 if (!can_override_loop_align)
5327 return align_loops_log;
5329 bb = BLOCK_FOR_INSN (label);
5330 ninsns = num_loop_insns(bb->loop_father);
5332 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5333 if (ninsns > 4 && ninsns <= 8
5334 && (rs6000_tune == PROCESSOR_POWER4
5335 || rs6000_tune == PROCESSOR_POWER5
5336 || rs6000_tune == PROCESSOR_POWER6
5337 || rs6000_tune == PROCESSOR_POWER7
5338 || rs6000_tune == PROCESSOR_POWER8))
5339 return 5;
5340 else
5341 return align_loops_log;
5344 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5345 static int
5346 rs6000_loop_align_max_skip (rtx_insn *label)
5348 return (1 << rs6000_loop_align (label)) - 1;
5351 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5352 after applying N number of iterations. This routine does not determine
5353 how may iterations are required to reach desired alignment. */
5355 static bool
5356 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5358 if (is_packed)
5359 return false;
5361 if (TARGET_32BIT)
5363 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5364 return true;
5366 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5367 return true;
5369 return false;
5371 else
5373 if (TARGET_MACHO)
5374 return false;
5376 /* Assuming that all other types are naturally aligned. CHECKME! */
5377 return true;
5381 /* Return true if the vector misalignment factor is supported by the
5382 target. */
5383 static bool
5384 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5385 const_tree type,
5386 int misalignment,
5387 bool is_packed)
5389 if (TARGET_VSX)
5391 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5392 return true;
5394 /* Return if movmisalign pattern is not supported for this mode. */
5395 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5396 return false;
5398 if (misalignment == -1)
5400 /* Misalignment factor is unknown at compile time but we know
5401 it's word aligned. */
5402 if (rs6000_vector_alignment_reachable (type, is_packed))
5404 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5406 if (element_size == 64 || element_size == 32)
5407 return true;
5410 return false;
5413 /* VSX supports word-aligned vector. */
5414 if (misalignment % 4 == 0)
5415 return true;
5417 return false;
5420 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5421 static int
5422 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5423 tree vectype, int misalign)
5425 unsigned elements;
5426 tree elem_type;
5428 switch (type_of_cost)
5430 case scalar_stmt:
5431 case scalar_load:
5432 case scalar_store:
5433 case vector_stmt:
5434 case vector_load:
5435 case vector_store:
5436 case vec_to_scalar:
5437 case scalar_to_vec:
5438 case cond_branch_not_taken:
5439 return 1;
5441 case vec_perm:
5442 if (TARGET_VSX)
5443 return 3;
5444 else
5445 return 1;
5447 case vec_promote_demote:
5448 if (TARGET_VSX)
5449 return 4;
5450 else
5451 return 1;
5453 case cond_branch_taken:
5454 return 3;
5456 case unaligned_load:
5457 case vector_gather_load:
5458 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5459 return 1;
5461 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5463 elements = TYPE_VECTOR_SUBPARTS (vectype);
5464 if (elements == 2)
5465 /* Double word aligned. */
5466 return 2;
5468 if (elements == 4)
5470 switch (misalign)
5472 case 8:
5473 /* Double word aligned. */
5474 return 2;
5476 case -1:
5477 /* Unknown misalignment. */
5478 case 4:
5479 case 12:
5480 /* Word aligned. */
5481 return 22;
5483 default:
5484 gcc_unreachable ();
5489 if (TARGET_ALTIVEC)
5490 /* Misaligned loads are not supported. */
5491 gcc_unreachable ();
5493 return 2;
5495 case unaligned_store:
5496 case vector_scatter_store:
5497 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5498 return 1;
5500 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5502 elements = TYPE_VECTOR_SUBPARTS (vectype);
5503 if (elements == 2)
5504 /* Double word aligned. */
5505 return 2;
5507 if (elements == 4)
5509 switch (misalign)
5511 case 8:
5512 /* Double word aligned. */
5513 return 2;
5515 case -1:
5516 /* Unknown misalignment. */
5517 case 4:
5518 case 12:
5519 /* Word aligned. */
5520 return 23;
5522 default:
5523 gcc_unreachable ();
5528 if (TARGET_ALTIVEC)
5529 /* Misaligned stores are not supported. */
5530 gcc_unreachable ();
5532 return 2;
5534 case vec_construct:
5535 /* This is a rough approximation assuming non-constant elements
5536 constructed into a vector via element insertion. FIXME:
5537 vec_construct is not granular enough for uniformly good
5538 decisions. If the initialization is a splat, this is
5539 cheaper than we estimate. Improve this someday. */
5540 elem_type = TREE_TYPE (vectype);
5541 /* 32-bit vectors loaded into registers are stored as double
5542 precision, so we need 2 permutes, 2 converts, and 1 merge
5543 to construct a vector of short floats from them. */
5544 if (SCALAR_FLOAT_TYPE_P (elem_type)
5545 && TYPE_PRECISION (elem_type) == 32)
5546 return 5;
5547 /* On POWER9, integer vector types are built up in GPRs and then
5548 use a direct move (2 cycles). For POWER8 this is even worse,
5549 as we need two direct moves and a merge, and the direct moves
5550 are five cycles. */
5551 else if (INTEGRAL_TYPE_P (elem_type))
5553 if (TARGET_P9_VECTOR)
5554 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5555 else
5556 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5558 else
5559 /* V2DFmode doesn't need a direct move. */
5560 return 2;
5562 default:
5563 gcc_unreachable ();
5567 /* Implement targetm.vectorize.preferred_simd_mode. */
5569 static machine_mode
5570 rs6000_preferred_simd_mode (scalar_mode mode)
5572 if (TARGET_VSX)
5573 switch (mode)
5575 case E_DFmode:
5576 return V2DFmode;
5577 default:;
5579 if (TARGET_ALTIVEC || TARGET_VSX)
5580 switch (mode)
5582 case E_SFmode:
5583 return V4SFmode;
5584 case E_TImode:
5585 return V1TImode;
5586 case E_DImode:
5587 return V2DImode;
5588 case E_SImode:
5589 return V4SImode;
5590 case E_HImode:
5591 return V8HImode;
5592 case E_QImode:
5593 return V16QImode;
5594 default:;
5596 if (TARGET_PAIRED_FLOAT
5597 && mode == SFmode)
5598 return V2SFmode;
5599 return word_mode;
5602 typedef struct _rs6000_cost_data
5604 struct loop *loop_info;
5605 unsigned cost[3];
5606 } rs6000_cost_data;
5608 /* Test for likely overcommitment of vector hardware resources. If a
5609 loop iteration is relatively large, and too large a percentage of
5610 instructions in the loop are vectorized, the cost model may not
5611 adequately reflect delays from unavailable vector resources.
5612 Penalize the loop body cost for this case. */
5614 static void
5615 rs6000_density_test (rs6000_cost_data *data)
5617 const int DENSITY_PCT_THRESHOLD = 85;
5618 const int DENSITY_SIZE_THRESHOLD = 70;
5619 const int DENSITY_PENALTY = 10;
5620 struct loop *loop = data->loop_info;
5621 basic_block *bbs = get_loop_body (loop);
5622 int nbbs = loop->num_nodes;
5623 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5624 int i, density_pct;
5626 for (i = 0; i < nbbs; i++)
5628 basic_block bb = bbs[i];
5629 gimple_stmt_iterator gsi;
5631 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5633 gimple *stmt = gsi_stmt (gsi);
5634 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5636 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5637 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5638 not_vec_cost++;
5642 free (bbs);
5643 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5645 if (density_pct > DENSITY_PCT_THRESHOLD
5646 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5648 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5649 if (dump_enabled_p ())
5650 dump_printf_loc (MSG_NOTE, vect_location,
5651 "density %d%%, cost %d exceeds threshold, penalizing "
5652 "loop body cost by %d%%", density_pct,
5653 vec_cost + not_vec_cost, DENSITY_PENALTY);
5657 /* Implement targetm.vectorize.init_cost. */
5659 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5660 instruction is needed by the vectorization. */
5661 static bool rs6000_vect_nonmem;
5663 static void *
5664 rs6000_init_cost (struct loop *loop_info)
5666 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5667 data->loop_info = loop_info;
5668 data->cost[vect_prologue] = 0;
5669 data->cost[vect_body] = 0;
5670 data->cost[vect_epilogue] = 0;
5671 rs6000_vect_nonmem = false;
5672 return data;
5675 /* Implement targetm.vectorize.add_stmt_cost. */
5677 static unsigned
5678 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5679 struct _stmt_vec_info *stmt_info, int misalign,
5680 enum vect_cost_model_location where)
5682 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5683 unsigned retval = 0;
5685 if (flag_vect_cost_model)
5687 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5688 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5689 misalign);
5690 /* Statements in an inner loop relative to the loop being
5691 vectorized are weighted more heavily. The value here is
5692 arbitrary and could potentially be improved with analysis. */
5693 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5694 count *= 50; /* FIXME. */
5696 retval = (unsigned) (count * stmt_cost);
5697 cost_data->cost[where] += retval;
5699 /* Check whether we're doing something other than just a copy loop.
5700 Not all such loops may be profitably vectorized; see
5701 rs6000_finish_cost. */
5702 if ((kind == vec_to_scalar || kind == vec_perm
5703 || kind == vec_promote_demote || kind == vec_construct
5704 || kind == scalar_to_vec)
5705 || (where == vect_body && kind == vector_stmt))
5706 rs6000_vect_nonmem = true;
5709 return retval;
5712 /* Implement targetm.vectorize.finish_cost. */
5714 static void
5715 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5716 unsigned *body_cost, unsigned *epilogue_cost)
5718 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5720 if (cost_data->loop_info)
5721 rs6000_density_test (cost_data);
5723 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5724 that require versioning for any reason. The vectorization is at
5725 best a wash inside the loop, and the versioning checks make
5726 profitability highly unlikely and potentially quite harmful. */
5727 if (cost_data->loop_info)
5729 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5730 if (!rs6000_vect_nonmem
5731 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5732 && LOOP_REQUIRES_VERSIONING (vec_info))
5733 cost_data->cost[vect_body] += 10000;
5736 *prologue_cost = cost_data->cost[vect_prologue];
5737 *body_cost = cost_data->cost[vect_body];
5738 *epilogue_cost = cost_data->cost[vect_epilogue];
5741 /* Implement targetm.vectorize.destroy_cost_data. */
5743 static void
5744 rs6000_destroy_cost_data (void *data)
5746 free (data);
5749 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5750 library with vectorized intrinsics. */
5752 static tree
5753 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5754 tree type_in)
5756 char name[32];
5757 const char *suffix = NULL;
5758 tree fntype, new_fndecl, bdecl = NULL_TREE;
5759 int n_args = 1;
5760 const char *bname;
5761 machine_mode el_mode, in_mode;
5762 int n, in_n;
5764 /* Libmass is suitable for unsafe math only as it does not correctly support
5765 parts of IEEE with the required precision such as denormals. Only support
5766 it if we have VSX to use the simd d2 or f4 functions.
5767 XXX: Add variable length support. */
5768 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5769 return NULL_TREE;
5771 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5772 n = TYPE_VECTOR_SUBPARTS (type_out);
5773 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5774 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5775 if (el_mode != in_mode
5776 || n != in_n)
5777 return NULL_TREE;
5779 switch (fn)
5781 CASE_CFN_ATAN2:
5782 CASE_CFN_HYPOT:
5783 CASE_CFN_POW:
5784 n_args = 2;
5785 gcc_fallthrough ();
5787 CASE_CFN_ACOS:
5788 CASE_CFN_ACOSH:
5789 CASE_CFN_ASIN:
5790 CASE_CFN_ASINH:
5791 CASE_CFN_ATAN:
5792 CASE_CFN_ATANH:
5793 CASE_CFN_CBRT:
5794 CASE_CFN_COS:
5795 CASE_CFN_COSH:
5796 CASE_CFN_ERF:
5797 CASE_CFN_ERFC:
5798 CASE_CFN_EXP2:
5799 CASE_CFN_EXP:
5800 CASE_CFN_EXPM1:
5801 CASE_CFN_LGAMMA:
5802 CASE_CFN_LOG10:
5803 CASE_CFN_LOG1P:
5804 CASE_CFN_LOG2:
5805 CASE_CFN_LOG:
5806 CASE_CFN_SIN:
5807 CASE_CFN_SINH:
5808 CASE_CFN_SQRT:
5809 CASE_CFN_TAN:
5810 CASE_CFN_TANH:
5811 if (el_mode == DFmode && n == 2)
5813 bdecl = mathfn_built_in (double_type_node, fn);
5814 suffix = "d2"; /* pow -> powd2 */
5816 else if (el_mode == SFmode && n == 4)
5818 bdecl = mathfn_built_in (float_type_node, fn);
5819 suffix = "4"; /* powf -> powf4 */
5821 else
5822 return NULL_TREE;
5823 if (!bdecl)
5824 return NULL_TREE;
5825 break;
5827 default:
5828 return NULL_TREE;
5831 gcc_assert (suffix != NULL);
5832 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5833 if (!bname)
5834 return NULL_TREE;
5836 strcpy (name, bname + sizeof ("__builtin_") - 1);
5837 strcat (name, suffix);
5839 if (n_args == 1)
5840 fntype = build_function_type_list (type_out, type_in, NULL);
5841 else if (n_args == 2)
5842 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5843 else
5844 gcc_unreachable ();
5846 /* Build a function declaration for the vectorized function. */
5847 new_fndecl = build_decl (BUILTINS_LOCATION,
5848 FUNCTION_DECL, get_identifier (name), fntype);
5849 TREE_PUBLIC (new_fndecl) = 1;
5850 DECL_EXTERNAL (new_fndecl) = 1;
5851 DECL_IS_NOVOPS (new_fndecl) = 1;
5852 TREE_READONLY (new_fndecl) = 1;
5854 return new_fndecl;
5857 /* Returns a function decl for a vectorized version of the builtin function
5858 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5859 if it is not available. */
5861 static tree
5862 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5863 tree type_in)
5865 machine_mode in_mode, out_mode;
5866 int in_n, out_n;
5868 if (TARGET_DEBUG_BUILTIN)
5869 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5870 combined_fn_name (combined_fn (fn)),
5871 GET_MODE_NAME (TYPE_MODE (type_out)),
5872 GET_MODE_NAME (TYPE_MODE (type_in)));
5874 if (TREE_CODE (type_out) != VECTOR_TYPE
5875 || TREE_CODE (type_in) != VECTOR_TYPE)
5876 return NULL_TREE;
5878 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5879 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5880 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5881 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5883 switch (fn)
5885 CASE_CFN_COPYSIGN:
5886 if (VECTOR_UNIT_VSX_P (V2DFmode)
5887 && out_mode == DFmode && out_n == 2
5888 && in_mode == DFmode && in_n == 2)
5889 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5890 if (VECTOR_UNIT_VSX_P (V4SFmode)
5891 && out_mode == SFmode && out_n == 4
5892 && in_mode == SFmode && in_n == 4)
5893 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5894 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5895 && out_mode == SFmode && out_n == 4
5896 && in_mode == SFmode && in_n == 4)
5897 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5898 break;
5899 CASE_CFN_CEIL:
5900 if (VECTOR_UNIT_VSX_P (V2DFmode)
5901 && out_mode == DFmode && out_n == 2
5902 && in_mode == DFmode && in_n == 2)
5903 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5904 if (VECTOR_UNIT_VSX_P (V4SFmode)
5905 && out_mode == SFmode && out_n == 4
5906 && in_mode == SFmode && in_n == 4)
5907 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5908 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5909 && out_mode == SFmode && out_n == 4
5910 && in_mode == SFmode && in_n == 4)
5911 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5912 break;
5913 CASE_CFN_FLOOR:
5914 if (VECTOR_UNIT_VSX_P (V2DFmode)
5915 && out_mode == DFmode && out_n == 2
5916 && in_mode == DFmode && in_n == 2)
5917 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5918 if (VECTOR_UNIT_VSX_P (V4SFmode)
5919 && out_mode == SFmode && out_n == 4
5920 && in_mode == SFmode && in_n == 4)
5921 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5922 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5923 && out_mode == SFmode && out_n == 4
5924 && in_mode == SFmode && in_n == 4)
5925 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5926 break;
5927 CASE_CFN_FMA:
5928 if (VECTOR_UNIT_VSX_P (V2DFmode)
5929 && out_mode == DFmode && out_n == 2
5930 && in_mode == DFmode && in_n == 2)
5931 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5932 if (VECTOR_UNIT_VSX_P (V4SFmode)
5933 && out_mode == SFmode && out_n == 4
5934 && in_mode == SFmode && in_n == 4)
5935 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5936 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5937 && out_mode == SFmode && out_n == 4
5938 && in_mode == SFmode && in_n == 4)
5939 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5940 break;
5941 CASE_CFN_TRUNC:
5942 if (VECTOR_UNIT_VSX_P (V2DFmode)
5943 && out_mode == DFmode && out_n == 2
5944 && in_mode == DFmode && in_n == 2)
5945 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5946 if (VECTOR_UNIT_VSX_P (V4SFmode)
5947 && out_mode == SFmode && out_n == 4
5948 && in_mode == SFmode && in_n == 4)
5949 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5950 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5951 && out_mode == SFmode && out_n == 4
5952 && in_mode == SFmode && in_n == 4)
5953 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5954 break;
5955 CASE_CFN_NEARBYINT:
5956 if (VECTOR_UNIT_VSX_P (V2DFmode)
5957 && flag_unsafe_math_optimizations
5958 && out_mode == DFmode && out_n == 2
5959 && in_mode == DFmode && in_n == 2)
5960 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5961 if (VECTOR_UNIT_VSX_P (V4SFmode)
5962 && flag_unsafe_math_optimizations
5963 && out_mode == SFmode && out_n == 4
5964 && in_mode == SFmode && in_n == 4)
5965 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5966 break;
5967 CASE_CFN_RINT:
5968 if (VECTOR_UNIT_VSX_P (V2DFmode)
5969 && !flag_trapping_math
5970 && out_mode == DFmode && out_n == 2
5971 && in_mode == DFmode && in_n == 2)
5972 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5973 if (VECTOR_UNIT_VSX_P (V4SFmode)
5974 && !flag_trapping_math
5975 && out_mode == SFmode && out_n == 4
5976 && in_mode == SFmode && in_n == 4)
5977 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5978 break;
5979 default:
5980 break;
5983 /* Generate calls to libmass if appropriate. */
5984 if (rs6000_veclib_handler)
5985 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5987 return NULL_TREE;
5990 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5992 static tree
5993 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5994 tree type_in)
5996 machine_mode in_mode, out_mode;
5997 int in_n, out_n;
5999 if (TARGET_DEBUG_BUILTIN)
6000 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6001 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6002 GET_MODE_NAME (TYPE_MODE (type_out)),
6003 GET_MODE_NAME (TYPE_MODE (type_in)));
6005 if (TREE_CODE (type_out) != VECTOR_TYPE
6006 || TREE_CODE (type_in) != VECTOR_TYPE)
6007 return NULL_TREE;
6009 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6010 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6011 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6012 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6014 enum rs6000_builtins fn
6015 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6016 switch (fn)
6018 case RS6000_BUILTIN_RSQRTF:
6019 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6020 && out_mode == SFmode && out_n == 4
6021 && in_mode == SFmode && in_n == 4)
6022 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6023 break;
6024 case RS6000_BUILTIN_RSQRT:
6025 if (VECTOR_UNIT_VSX_P (V2DFmode)
6026 && out_mode == DFmode && out_n == 2
6027 && in_mode == DFmode && in_n == 2)
6028 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6029 break;
6030 case RS6000_BUILTIN_RECIPF:
6031 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6032 && out_mode == SFmode && out_n == 4
6033 && in_mode == SFmode && in_n == 4)
6034 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6035 break;
6036 case RS6000_BUILTIN_RECIP:
6037 if (VECTOR_UNIT_VSX_P (V2DFmode)
6038 && out_mode == DFmode && out_n == 2
6039 && in_mode == DFmode && in_n == 2)
6040 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6041 break;
6042 default:
6043 break;
6045 return NULL_TREE;
6048 /* Default CPU string for rs6000*_file_start functions. */
6049 static const char *rs6000_default_cpu;
6051 /* Do anything needed at the start of the asm file. */
6053 static void
6054 rs6000_file_start (void)
6056 char buffer[80];
6057 const char *start = buffer;
6058 FILE *file = asm_out_file;
6060 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6062 default_file_start ();
6064 if (flag_verbose_asm)
6066 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6068 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6070 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6071 start = "";
6074 if (global_options_set.x_rs6000_cpu_index)
6076 fprintf (file, "%s -mcpu=%s", start,
6077 processor_target_table[rs6000_cpu_index].name);
6078 start = "";
6081 if (global_options_set.x_rs6000_tune_index)
6083 fprintf (file, "%s -mtune=%s", start,
6084 processor_target_table[rs6000_tune_index].name);
6085 start = "";
6088 if (PPC405_ERRATUM77)
6090 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6091 start = "";
6094 #ifdef USING_ELFOS_H
6095 switch (rs6000_sdata)
6097 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6098 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6099 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6100 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6103 if (rs6000_sdata && g_switch_value)
6105 fprintf (file, "%s -G %d", start,
6106 g_switch_value);
6107 start = "";
6109 #endif
6111 if (*start == '\0')
6112 putc ('\n', file);
6115 #ifdef USING_ELFOS_H
6116 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6117 && !global_options_set.x_rs6000_cpu_index)
6119 fputs ("\t.machine ", asm_out_file);
6120 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6121 fputs ("power9\n", asm_out_file);
6122 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6123 fputs ("power8\n", asm_out_file);
6124 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6125 fputs ("power7\n", asm_out_file);
6126 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6127 fputs ("power6\n", asm_out_file);
6128 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6129 fputs ("power5\n", asm_out_file);
6130 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6131 fputs ("power4\n", asm_out_file);
6132 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6133 fputs ("ppc64\n", asm_out_file);
6134 else
6135 fputs ("ppc\n", asm_out_file);
6137 #endif
6139 if (DEFAULT_ABI == ABI_ELFv2)
6140 fprintf (file, "\t.abiversion 2\n");
6144 /* Return nonzero if this function is known to have a null epilogue. */
6147 direct_return (void)
6149 if (reload_completed)
6151 rs6000_stack_t *info = rs6000_stack_info ();
6153 if (info->first_gp_reg_save == 32
6154 && info->first_fp_reg_save == 64
6155 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6156 && ! info->lr_save_p
6157 && ! info->cr_save_p
6158 && info->vrsave_size == 0
6159 && ! info->push_p)
6160 return 1;
6163 return 0;
6166 /* Return the number of instructions it takes to form a constant in an
6167 integer register. */
6170 num_insns_constant_wide (HOST_WIDE_INT value)
6172 /* signed constant loadable with addi */
6173 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6174 return 1;
6176 /* constant loadable with addis */
6177 else if ((value & 0xffff) == 0
6178 && (value >> 31 == -1 || value >> 31 == 0))
6179 return 1;
6181 else if (TARGET_POWERPC64)
6183 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6184 HOST_WIDE_INT high = value >> 31;
6186 if (high == 0 || high == -1)
6187 return 2;
6189 high >>= 1;
6191 if (low == 0)
6192 return num_insns_constant_wide (high) + 1;
6193 else if (high == 0)
6194 return num_insns_constant_wide (low) + 1;
6195 else
6196 return (num_insns_constant_wide (high)
6197 + num_insns_constant_wide (low) + 1);
6200 else
6201 return 2;
6205 num_insns_constant (rtx op, machine_mode mode)
6207 HOST_WIDE_INT low, high;
6209 switch (GET_CODE (op))
6211 case CONST_INT:
6212 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6213 && rs6000_is_valid_and_mask (op, mode))
6214 return 2;
6215 else
6216 return num_insns_constant_wide (INTVAL (op));
6218 case CONST_WIDE_INT:
6220 int i;
6221 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6222 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6223 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6224 return ins;
6227 case CONST_DOUBLE:
6228 if (mode == SFmode || mode == SDmode)
6230 long l;
6232 if (DECIMAL_FLOAT_MODE_P (mode))
6233 REAL_VALUE_TO_TARGET_DECIMAL32
6234 (*CONST_DOUBLE_REAL_VALUE (op), l);
6235 else
6236 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6237 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6240 long l[2];
6241 if (DECIMAL_FLOAT_MODE_P (mode))
6242 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6243 else
6244 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6245 high = l[WORDS_BIG_ENDIAN == 0];
6246 low = l[WORDS_BIG_ENDIAN != 0];
6248 if (TARGET_32BIT)
6249 return (num_insns_constant_wide (low)
6250 + num_insns_constant_wide (high));
6251 else
6253 if ((high == 0 && low >= 0)
6254 || (high == -1 && low < 0))
6255 return num_insns_constant_wide (low);
6257 else if (rs6000_is_valid_and_mask (op, mode))
6258 return 2;
6260 else if (low == 0)
6261 return num_insns_constant_wide (high) + 1;
6263 else
6264 return (num_insns_constant_wide (high)
6265 + num_insns_constant_wide (low) + 1);
6268 default:
6269 gcc_unreachable ();
6273 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6274 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6275 corresponding element of the vector, but for V4SFmode and V2SFmode,
6276 the corresponding "float" is interpreted as an SImode integer. */
6278 HOST_WIDE_INT
6279 const_vector_elt_as_int (rtx op, unsigned int elt)
6281 rtx tmp;
6283 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6284 gcc_assert (GET_MODE (op) != V2DImode
6285 && GET_MODE (op) != V2DFmode);
6287 tmp = CONST_VECTOR_ELT (op, elt);
6288 if (GET_MODE (op) == V4SFmode
6289 || GET_MODE (op) == V2SFmode)
6290 tmp = gen_lowpart (SImode, tmp);
6291 return INTVAL (tmp);
6294 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6295 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6296 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6297 all items are set to the same value and contain COPIES replicas of the
6298 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6299 operand and the others are set to the value of the operand's msb. */
6301 static bool
6302 vspltis_constant (rtx op, unsigned step, unsigned copies)
6304 machine_mode mode = GET_MODE (op);
6305 machine_mode inner = GET_MODE_INNER (mode);
6307 unsigned i;
6308 unsigned nunits;
6309 unsigned bitsize;
6310 unsigned mask;
6312 HOST_WIDE_INT val;
6313 HOST_WIDE_INT splat_val;
6314 HOST_WIDE_INT msb_val;
6316 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6317 return false;
6319 nunits = GET_MODE_NUNITS (mode);
6320 bitsize = GET_MODE_BITSIZE (inner);
6321 mask = GET_MODE_MASK (inner);
6323 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6324 splat_val = val;
6325 msb_val = val >= 0 ? 0 : -1;
6327 /* Construct the value to be splatted, if possible. If not, return 0. */
6328 for (i = 2; i <= copies; i *= 2)
6330 HOST_WIDE_INT small_val;
6331 bitsize /= 2;
6332 small_val = splat_val >> bitsize;
6333 mask >>= bitsize;
6334 if (splat_val != ((HOST_WIDE_INT)
6335 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6336 | (small_val & mask)))
6337 return false;
6338 splat_val = small_val;
6341 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6342 if (EASY_VECTOR_15 (splat_val))
6345 /* Also check if we can splat, and then add the result to itself. Do so if
6346 the value is positive, of if the splat instruction is using OP's mode;
6347 for splat_val < 0, the splat and the add should use the same mode. */
6348 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6349 && (splat_val >= 0 || (step == 1 && copies == 1)))
6352 /* Also check if are loading up the most significant bit which can be done by
6353 loading up -1 and shifting the value left by -1. */
6354 else if (EASY_VECTOR_MSB (splat_val, inner))
6357 else
6358 return false;
6360 /* Check if VAL is present in every STEP-th element, and the
6361 other elements are filled with its most significant bit. */
6362 for (i = 1; i < nunits; ++i)
6364 HOST_WIDE_INT desired_val;
6365 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6366 if ((i & (step - 1)) == 0)
6367 desired_val = val;
6368 else
6369 desired_val = msb_val;
6371 if (desired_val != const_vector_elt_as_int (op, elt))
6372 return false;
6375 return true;
6378 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6379 instruction, filling in the bottom elements with 0 or -1.
6381 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6382 for the number of zeroes to shift in, or negative for the number of 0xff
6383 bytes to shift in.
6385 OP is a CONST_VECTOR. */
6388 vspltis_shifted (rtx op)
6390 machine_mode mode = GET_MODE (op);
6391 machine_mode inner = GET_MODE_INNER (mode);
6393 unsigned i, j;
6394 unsigned nunits;
6395 unsigned mask;
6397 HOST_WIDE_INT val;
6399 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6400 return false;
6402 /* We need to create pseudo registers to do the shift, so don't recognize
6403 shift vector constants after reload. */
6404 if (!can_create_pseudo_p ())
6405 return false;
6407 nunits = GET_MODE_NUNITS (mode);
6408 mask = GET_MODE_MASK (inner);
6410 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6412 /* Check if the value can really be the operand of a vspltis[bhw]. */
6413 if (EASY_VECTOR_15 (val))
6416 /* Also check if we are loading up the most significant bit which can be done
6417 by loading up -1 and shifting the value left by -1. */
6418 else if (EASY_VECTOR_MSB (val, inner))
6421 else
6422 return 0;
6424 /* Check if VAL is present in every STEP-th element until we find elements
6425 that are 0 or all 1 bits. */
6426 for (i = 1; i < nunits; ++i)
6428 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6429 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6431 /* If the value isn't the splat value, check for the remaining elements
6432 being 0/-1. */
6433 if (val != elt_val)
6435 if (elt_val == 0)
6437 for (j = i+1; j < nunits; ++j)
6439 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6440 if (const_vector_elt_as_int (op, elt2) != 0)
6441 return 0;
6444 return (nunits - i) * GET_MODE_SIZE (inner);
6447 else if ((elt_val & mask) == mask)
6449 for (j = i+1; j < nunits; ++j)
6451 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6452 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6453 return 0;
6456 return -((nunits - i) * GET_MODE_SIZE (inner));
6459 else
6460 return 0;
6464 /* If all elements are equal, we don't need to do VLSDOI. */
6465 return 0;
6469 /* Return true if OP is of the given MODE and can be synthesized
6470 with a vspltisb, vspltish or vspltisw. */
6472 bool
6473 easy_altivec_constant (rtx op, machine_mode mode)
6475 unsigned step, copies;
6477 if (mode == VOIDmode)
6478 mode = GET_MODE (op);
6479 else if (mode != GET_MODE (op))
6480 return false;
6482 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6483 constants. */
6484 if (mode == V2DFmode)
6485 return zero_constant (op, mode);
6487 else if (mode == V2DImode)
6489 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6490 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6491 return false;
6493 if (zero_constant (op, mode))
6494 return true;
6496 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6497 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6498 return true;
6500 return false;
6503 /* V1TImode is a special container for TImode. Ignore for now. */
6504 else if (mode == V1TImode)
6505 return false;
6507 /* Start with a vspltisw. */
6508 step = GET_MODE_NUNITS (mode) / 4;
6509 copies = 1;
6511 if (vspltis_constant (op, step, copies))
6512 return true;
6514 /* Then try with a vspltish. */
6515 if (step == 1)
6516 copies <<= 1;
6517 else
6518 step >>= 1;
6520 if (vspltis_constant (op, step, copies))
6521 return true;
6523 /* And finally a vspltisb. */
6524 if (step == 1)
6525 copies <<= 1;
6526 else
6527 step >>= 1;
6529 if (vspltis_constant (op, step, copies))
6530 return true;
6532 if (vspltis_shifted (op) != 0)
6533 return true;
6535 return false;
6538 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6539 result is OP. Abort if it is not possible. */
6542 gen_easy_altivec_constant (rtx op)
6544 machine_mode mode = GET_MODE (op);
6545 int nunits = GET_MODE_NUNITS (mode);
6546 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6547 unsigned step = nunits / 4;
6548 unsigned copies = 1;
6550 /* Start with a vspltisw. */
6551 if (vspltis_constant (op, step, copies))
6552 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6554 /* Then try with a vspltish. */
6555 if (step == 1)
6556 copies <<= 1;
6557 else
6558 step >>= 1;
6560 if (vspltis_constant (op, step, copies))
6561 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6563 /* And finally a vspltisb. */
6564 if (step == 1)
6565 copies <<= 1;
6566 else
6567 step >>= 1;
6569 if (vspltis_constant (op, step, copies))
6570 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6572 gcc_unreachable ();
6575 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6576 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6578 Return the number of instructions needed (1 or 2) into the address pointed
6579 via NUM_INSNS_PTR.
6581 Return the constant that is being split via CONSTANT_PTR. */
6583 bool
6584 xxspltib_constant_p (rtx op,
6585 machine_mode mode,
6586 int *num_insns_ptr,
6587 int *constant_ptr)
6589 size_t nunits = GET_MODE_NUNITS (mode);
6590 size_t i;
6591 HOST_WIDE_INT value;
6592 rtx element;
6594 /* Set the returned values to out of bound values. */
6595 *num_insns_ptr = -1;
6596 *constant_ptr = 256;
6598 if (!TARGET_P9_VECTOR)
6599 return false;
6601 if (mode == VOIDmode)
6602 mode = GET_MODE (op);
6604 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6605 return false;
6607 /* Handle (vec_duplicate <constant>). */
6608 if (GET_CODE (op) == VEC_DUPLICATE)
6610 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6611 && mode != V2DImode)
6612 return false;
6614 element = XEXP (op, 0);
6615 if (!CONST_INT_P (element))
6616 return false;
6618 value = INTVAL (element);
6619 if (!IN_RANGE (value, -128, 127))
6620 return false;
6623 /* Handle (const_vector [...]). */
6624 else if (GET_CODE (op) == CONST_VECTOR)
6626 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6627 && mode != V2DImode)
6628 return false;
6630 element = CONST_VECTOR_ELT (op, 0);
6631 if (!CONST_INT_P (element))
6632 return false;
6634 value = INTVAL (element);
6635 if (!IN_RANGE (value, -128, 127))
6636 return false;
6638 for (i = 1; i < nunits; i++)
6640 element = CONST_VECTOR_ELT (op, i);
6641 if (!CONST_INT_P (element))
6642 return false;
6644 if (value != INTVAL (element))
6645 return false;
6649 /* Handle integer constants being loaded into the upper part of the VSX
6650 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6651 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6652 else if (CONST_INT_P (op))
6654 if (!SCALAR_INT_MODE_P (mode))
6655 return false;
6657 value = INTVAL (op);
6658 if (!IN_RANGE (value, -128, 127))
6659 return false;
6661 if (!IN_RANGE (value, -1, 0))
6663 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6664 return false;
6666 if (EASY_VECTOR_15 (value))
6667 return false;
6671 else
6672 return false;
6674 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6675 sign extend. Special case 0/-1 to allow getting any VSX register instead
6676 of an Altivec register. */
6677 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6678 && EASY_VECTOR_15 (value))
6679 return false;
6681 /* Return # of instructions and the constant byte for XXSPLTIB. */
6682 if (mode == V16QImode)
6683 *num_insns_ptr = 1;
6685 else if (IN_RANGE (value, -1, 0))
6686 *num_insns_ptr = 1;
6688 else
6689 *num_insns_ptr = 2;
6691 *constant_ptr = (int) value;
6692 return true;
6695 const char *
6696 output_vec_const_move (rtx *operands)
6698 int shift;
6699 machine_mode mode;
6700 rtx dest, vec;
6702 dest = operands[0];
6703 vec = operands[1];
6704 mode = GET_MODE (dest);
6706 if (TARGET_VSX)
6708 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6709 int xxspltib_value = 256;
6710 int num_insns = -1;
6712 if (zero_constant (vec, mode))
6714 if (TARGET_P9_VECTOR)
6715 return "xxspltib %x0,0";
6717 else if (dest_vmx_p)
6718 return "vspltisw %0,0";
6720 else
6721 return "xxlxor %x0,%x0,%x0";
6724 if (all_ones_constant (vec, mode))
6726 if (TARGET_P9_VECTOR)
6727 return "xxspltib %x0,255";
6729 else if (dest_vmx_p)
6730 return "vspltisw %0,-1";
6732 else if (TARGET_P8_VECTOR)
6733 return "xxlorc %x0,%x0,%x0";
6735 else
6736 gcc_unreachable ();
6739 if (TARGET_P9_VECTOR
6740 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6742 if (num_insns == 1)
6744 operands[2] = GEN_INT (xxspltib_value & 0xff);
6745 return "xxspltib %x0,%2";
6748 return "#";
6752 if (TARGET_ALTIVEC)
6754 rtx splat_vec;
6756 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6757 if (zero_constant (vec, mode))
6758 return "vspltisw %0,0";
6760 if (all_ones_constant (vec, mode))
6761 return "vspltisw %0,-1";
6763 /* Do we need to construct a value using VSLDOI? */
6764 shift = vspltis_shifted (vec);
6765 if (shift != 0)
6766 return "#";
6768 splat_vec = gen_easy_altivec_constant (vec);
6769 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6770 operands[1] = XEXP (splat_vec, 0);
6771 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6772 return "#";
6774 switch (GET_MODE (splat_vec))
6776 case E_V4SImode:
6777 return "vspltisw %0,%1";
6779 case E_V8HImode:
6780 return "vspltish %0,%1";
6782 case E_V16QImode:
6783 return "vspltisb %0,%1";
6785 default:
6786 gcc_unreachable ();
6790 gcc_unreachable ();
6793 /* Initialize TARGET of vector PAIRED to VALS. */
6795 void
6796 paired_expand_vector_init (rtx target, rtx vals)
6798 machine_mode mode = GET_MODE (target);
6799 int n_elts = GET_MODE_NUNITS (mode);
6800 int n_var = 0;
6801 rtx x, new_rtx, tmp, constant_op, op1, op2;
6802 int i;
6804 for (i = 0; i < n_elts; ++i)
6806 x = XVECEXP (vals, 0, i);
6807 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6808 ++n_var;
6810 if (n_var == 0)
6812 /* Load from constant pool. */
6813 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6814 return;
6817 if (n_var == 2)
6819 /* The vector is initialized only with non-constants. */
6820 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6821 XVECEXP (vals, 0, 1));
6823 emit_move_insn (target, new_rtx);
6824 return;
6827 /* One field is non-constant and the other one is a constant. Load the
6828 constant from the constant pool and use ps_merge instruction to
6829 construct the whole vector. */
6830 op1 = XVECEXP (vals, 0, 0);
6831 op2 = XVECEXP (vals, 0, 1);
6833 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6835 tmp = gen_reg_rtx (GET_MODE (constant_op));
6836 emit_move_insn (tmp, constant_op);
6838 if (CONSTANT_P (op1))
6839 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6840 else
6841 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6843 emit_move_insn (target, new_rtx);
6846 void
6847 paired_expand_vector_move (rtx operands[])
6849 rtx op0 = operands[0], op1 = operands[1];
6851 emit_move_insn (op0, op1);
6854 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6855 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6856 operands for the relation operation COND. This is a recursive
6857 function. */
6859 static void
6860 paired_emit_vector_compare (enum rtx_code rcode,
6861 rtx dest, rtx op0, rtx op1,
6862 rtx cc_op0, rtx cc_op1)
6864 rtx tmp = gen_reg_rtx (V2SFmode);
6865 rtx tmp1, max, min;
6867 gcc_assert (TARGET_PAIRED_FLOAT);
6868 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6870 switch (rcode)
6872 case LT:
6873 case LTU:
6874 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6875 return;
6876 case GE:
6877 case GEU:
6878 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6879 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6880 return;
6881 case LE:
6882 case LEU:
6883 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6884 return;
6885 case GT:
6886 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6887 return;
6888 case EQ:
6889 tmp1 = gen_reg_rtx (V2SFmode);
6890 max = gen_reg_rtx (V2SFmode);
6891 min = gen_reg_rtx (V2SFmode);
6892 gen_reg_rtx (V2SFmode);
6894 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6895 emit_insn (gen_selv2sf4
6896 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6897 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6898 emit_insn (gen_selv2sf4
6899 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6900 emit_insn (gen_subv2sf3 (tmp1, min, max));
6901 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6902 return;
6903 case NE:
6904 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6905 return;
6906 case UNLE:
6907 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6908 return;
6909 case UNLT:
6910 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6911 return;
6912 case UNGE:
6913 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6914 return;
6915 case UNGT:
6916 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6917 return;
6918 default:
6919 gcc_unreachable ();
6922 return;
6925 /* Emit vector conditional expression.
6926 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6927 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6930 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6931 rtx cond, rtx cc_op0, rtx cc_op1)
6933 enum rtx_code rcode = GET_CODE (cond);
6935 if (!TARGET_PAIRED_FLOAT)
6936 return 0;
6938 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6940 return 1;
6943 /* Initialize vector TARGET to VALS. */
6945 void
6946 rs6000_expand_vector_init (rtx target, rtx vals)
6948 machine_mode mode = GET_MODE (target);
6949 machine_mode inner_mode = GET_MODE_INNER (mode);
6950 int n_elts = GET_MODE_NUNITS (mode);
6951 int n_var = 0, one_var = -1;
6952 bool all_same = true, all_const_zero = true;
6953 rtx x, mem;
6954 int i;
6956 for (i = 0; i < n_elts; ++i)
6958 x = XVECEXP (vals, 0, i);
6959 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6960 ++n_var, one_var = i;
6961 else if (x != CONST0_RTX (inner_mode))
6962 all_const_zero = false;
6964 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6965 all_same = false;
6968 if (n_var == 0)
6970 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6971 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6972 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6974 /* Zero register. */
6975 emit_move_insn (target, CONST0_RTX (mode));
6976 return;
6978 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6980 /* Splat immediate. */
6981 emit_insn (gen_rtx_SET (target, const_vec));
6982 return;
6984 else
6986 /* Load from constant pool. */
6987 emit_move_insn (target, const_vec);
6988 return;
6992 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6993 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6995 rtx op[2];
6996 size_t i;
6997 size_t num_elements = all_same ? 1 : 2;
6998 for (i = 0; i < num_elements; i++)
7000 op[i] = XVECEXP (vals, 0, i);
7001 /* Just in case there is a SUBREG with a smaller mode, do a
7002 conversion. */
7003 if (GET_MODE (op[i]) != inner_mode)
7005 rtx tmp = gen_reg_rtx (inner_mode);
7006 convert_move (tmp, op[i], 0);
7007 op[i] = tmp;
7009 /* Allow load with splat double word. */
7010 else if (MEM_P (op[i]))
7012 if (!all_same)
7013 op[i] = force_reg (inner_mode, op[i]);
7015 else if (!REG_P (op[i]))
7016 op[i] = force_reg (inner_mode, op[i]);
7019 if (all_same)
7021 if (mode == V2DFmode)
7022 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7023 else
7024 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7026 else
7028 if (mode == V2DFmode)
7029 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7030 else
7031 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7033 return;
7036 /* Special case initializing vector int if we are on 64-bit systems with
7037 direct move or we have the ISA 3.0 instructions. */
7038 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7039 && TARGET_DIRECT_MOVE_64BIT)
7041 if (all_same)
7043 rtx element0 = XVECEXP (vals, 0, 0);
7044 if (MEM_P (element0))
7045 element0 = rs6000_address_for_fpconvert (element0);
7046 else
7047 element0 = force_reg (SImode, element0);
7049 if (TARGET_P9_VECTOR)
7050 emit_insn (gen_vsx_splat_v4si (target, element0));
7051 else
7053 rtx tmp = gen_reg_rtx (DImode);
7054 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7055 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7057 return;
7059 else
7061 rtx elements[4];
7062 size_t i;
7064 for (i = 0; i < 4; i++)
7066 elements[i] = XVECEXP (vals, 0, i);
7067 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7068 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7071 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7072 elements[2], elements[3]));
7073 return;
7077 /* With single precision floating point on VSX, know that internally single
7078 precision is actually represented as a double, and either make 2 V2DF
7079 vectors, and convert these vectors to single precision, or do one
7080 conversion, and splat the result to the other elements. */
7081 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7083 if (all_same)
7085 rtx element0 = XVECEXP (vals, 0, 0);
7087 if (TARGET_P9_VECTOR)
7089 if (MEM_P (element0))
7090 element0 = rs6000_address_for_fpconvert (element0);
7092 emit_insn (gen_vsx_splat_v4sf (target, element0));
7095 else
7097 rtx freg = gen_reg_rtx (V4SFmode);
7098 rtx sreg = force_reg (SFmode, element0);
7099 rtx cvt = (TARGET_XSCVDPSPN
7100 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7101 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7103 emit_insn (cvt);
7104 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7105 const0_rtx));
7108 else
7110 rtx dbl_even = gen_reg_rtx (V2DFmode);
7111 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7112 rtx flt_even = gen_reg_rtx (V4SFmode);
7113 rtx flt_odd = gen_reg_rtx (V4SFmode);
7114 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7115 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7116 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7117 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7119 /* Use VMRGEW if we can instead of doing a permute. */
7120 if (TARGET_P8_VECTOR)
7122 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7123 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7124 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7125 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7126 if (BYTES_BIG_ENDIAN)
7127 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7128 else
7129 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7131 else
7133 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7134 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7135 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7136 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7137 rs6000_expand_extract_even (target, flt_even, flt_odd);
7140 return;
7143 /* Special case initializing vector short/char that are splats if we are on
7144 64-bit systems with direct move. */
7145 if (all_same && TARGET_DIRECT_MOVE_64BIT
7146 && (mode == V16QImode || mode == V8HImode))
7148 rtx op0 = XVECEXP (vals, 0, 0);
7149 rtx di_tmp = gen_reg_rtx (DImode);
7151 if (!REG_P (op0))
7152 op0 = force_reg (GET_MODE_INNER (mode), op0);
7154 if (mode == V16QImode)
7156 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7157 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7158 return;
7161 if (mode == V8HImode)
7163 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7164 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7165 return;
7169 /* Store value to stack temp. Load vector element. Splat. However, splat
7170 of 64-bit items is not supported on Altivec. */
7171 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7173 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7174 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7175 XVECEXP (vals, 0, 0));
7176 x = gen_rtx_UNSPEC (VOIDmode,
7177 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7178 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7179 gen_rtvec (2,
7180 gen_rtx_SET (target, mem),
7181 x)));
7182 x = gen_rtx_VEC_SELECT (inner_mode, target,
7183 gen_rtx_PARALLEL (VOIDmode,
7184 gen_rtvec (1, const0_rtx)));
7185 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7186 return;
7189 /* One field is non-constant. Load constant then overwrite
7190 varying field. */
7191 if (n_var == 1)
7193 rtx copy = copy_rtx (vals);
7195 /* Load constant part of vector, substitute neighboring value for
7196 varying element. */
7197 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7198 rs6000_expand_vector_init (target, copy);
7200 /* Insert variable. */
7201 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7202 return;
7205 /* Construct the vector in memory one field at a time
7206 and load the whole vector. */
7207 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7208 for (i = 0; i < n_elts; i++)
7209 emit_move_insn (adjust_address_nv (mem, inner_mode,
7210 i * GET_MODE_SIZE (inner_mode)),
7211 XVECEXP (vals, 0, i));
7212 emit_move_insn (target, mem);
7215 /* Set field ELT of TARGET to VAL. */
7217 void
7218 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7220 machine_mode mode = GET_MODE (target);
7221 machine_mode inner_mode = GET_MODE_INNER (mode);
7222 rtx reg = gen_reg_rtx (mode);
7223 rtx mask, mem, x;
7224 int width = GET_MODE_SIZE (inner_mode);
7225 int i;
7227 val = force_reg (GET_MODE (val), val);
7229 if (VECTOR_MEM_VSX_P (mode))
7231 rtx insn = NULL_RTX;
7232 rtx elt_rtx = GEN_INT (elt);
7234 if (mode == V2DFmode)
7235 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7237 else if (mode == V2DImode)
7238 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7240 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7242 if (mode == V4SImode)
7243 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7244 else if (mode == V8HImode)
7245 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7246 else if (mode == V16QImode)
7247 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7248 else if (mode == V4SFmode)
7249 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7252 if (insn)
7254 emit_insn (insn);
7255 return;
7259 /* Simplify setting single element vectors like V1TImode. */
7260 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7262 emit_move_insn (target, gen_lowpart (mode, val));
7263 return;
7266 /* Load single variable value. */
7267 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7268 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7269 x = gen_rtx_UNSPEC (VOIDmode,
7270 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7271 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7272 gen_rtvec (2,
7273 gen_rtx_SET (reg, mem),
7274 x)));
7276 /* Linear sequence. */
7277 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7278 for (i = 0; i < 16; ++i)
7279 XVECEXP (mask, 0, i) = GEN_INT (i);
7281 /* Set permute mask to insert element into target. */
7282 for (i = 0; i < width; ++i)
7283 XVECEXP (mask, 0, elt*width + i)
7284 = GEN_INT (i + 0x10);
7285 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7287 if (BYTES_BIG_ENDIAN)
7288 x = gen_rtx_UNSPEC (mode,
7289 gen_rtvec (3, target, reg,
7290 force_reg (V16QImode, x)),
7291 UNSPEC_VPERM);
7292 else
7294 if (TARGET_P9_VECTOR)
7295 x = gen_rtx_UNSPEC (mode,
7296 gen_rtvec (3, reg, target,
7297 force_reg (V16QImode, x)),
7298 UNSPEC_VPERMR);
7299 else
7301 /* Invert selector. We prefer to generate VNAND on P8 so
7302 that future fusion opportunities can kick in, but must
7303 generate VNOR elsewhere. */
7304 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7305 rtx iorx = (TARGET_P8_VECTOR
7306 ? gen_rtx_IOR (V16QImode, notx, notx)
7307 : gen_rtx_AND (V16QImode, notx, notx));
7308 rtx tmp = gen_reg_rtx (V16QImode);
7309 emit_insn (gen_rtx_SET (tmp, iorx));
7311 /* Permute with operands reversed and adjusted selector. */
7312 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7313 UNSPEC_VPERM);
7317 emit_insn (gen_rtx_SET (target, x));
7320 /* Extract field ELT from VEC into TARGET. */
7322 void
7323 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7325 machine_mode mode = GET_MODE (vec);
7326 machine_mode inner_mode = GET_MODE_INNER (mode);
7327 rtx mem;
7329 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7331 switch (mode)
7333 default:
7334 break;
7335 case E_V1TImode:
7336 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7337 emit_move_insn (target, gen_lowpart (TImode, vec));
7338 break;
7339 case E_V2DFmode:
7340 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7341 return;
7342 case E_V2DImode:
7343 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7344 return;
7345 case E_V4SFmode:
7346 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7347 return;
7348 case E_V16QImode:
7349 if (TARGET_DIRECT_MOVE_64BIT)
7351 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7352 return;
7354 else
7355 break;
7356 case E_V8HImode:
7357 if (TARGET_DIRECT_MOVE_64BIT)
7359 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7360 return;
7362 else
7363 break;
7364 case E_V4SImode:
7365 if (TARGET_DIRECT_MOVE_64BIT)
7367 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7368 return;
7370 break;
7373 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7374 && TARGET_DIRECT_MOVE_64BIT)
7376 if (GET_MODE (elt) != DImode)
7378 rtx tmp = gen_reg_rtx (DImode);
7379 convert_move (tmp, elt, 0);
7380 elt = tmp;
7382 else if (!REG_P (elt))
7383 elt = force_reg (DImode, elt);
7385 switch (mode)
7387 case E_V2DFmode:
7388 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7389 return;
7391 case E_V2DImode:
7392 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7393 return;
7395 case E_V4SFmode:
7396 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7397 return;
7399 case E_V4SImode:
7400 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7401 return;
7403 case E_V8HImode:
7404 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7405 return;
7407 case E_V16QImode:
7408 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7409 return;
7411 default:
7412 gcc_unreachable ();
7416 gcc_assert (CONST_INT_P (elt));
7418 /* Allocate mode-sized buffer. */
7419 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7421 emit_move_insn (mem, vec);
7423 /* Add offset to field within buffer matching vector element. */
7424 mem = adjust_address_nv (mem, inner_mode,
7425 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7427 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7430 /* Helper function to return the register number of a RTX. */
7431 static inline int
7432 regno_or_subregno (rtx op)
7434 if (REG_P (op))
7435 return REGNO (op);
7436 else if (SUBREG_P (op))
7437 return subreg_regno (op);
7438 else
7439 gcc_unreachable ();
7442 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7443 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7444 temporary (BASE_TMP) to fixup the address. Return the new memory address
7445 that is valid for reads or writes to a given register (SCALAR_REG). */
7448 rs6000_adjust_vec_address (rtx scalar_reg,
7449 rtx mem,
7450 rtx element,
7451 rtx base_tmp,
7452 machine_mode scalar_mode)
7454 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7455 rtx addr = XEXP (mem, 0);
7456 rtx element_offset;
7457 rtx new_addr;
7458 bool valid_addr_p;
7460 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7461 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7463 /* Calculate what we need to add to the address to get the element
7464 address. */
7465 if (CONST_INT_P (element))
7466 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7467 else
7469 int byte_shift = exact_log2 (scalar_size);
7470 gcc_assert (byte_shift >= 0);
7472 if (byte_shift == 0)
7473 element_offset = element;
7475 else
7477 if (TARGET_POWERPC64)
7478 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7479 else
7480 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7482 element_offset = base_tmp;
7486 /* Create the new address pointing to the element within the vector. If we
7487 are adding 0, we don't have to change the address. */
7488 if (element_offset == const0_rtx)
7489 new_addr = addr;
7491 /* A simple indirect address can be converted into a reg + offset
7492 address. */
7493 else if (REG_P (addr) || SUBREG_P (addr))
7494 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7496 /* Optimize D-FORM addresses with constant offset with a constant element, to
7497 include the element offset in the address directly. */
7498 else if (GET_CODE (addr) == PLUS)
7500 rtx op0 = XEXP (addr, 0);
7501 rtx op1 = XEXP (addr, 1);
7502 rtx insn;
7504 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7505 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7507 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7508 rtx offset_rtx = GEN_INT (offset);
7510 if (IN_RANGE (offset, -32768, 32767)
7511 && (scalar_size < 8 || (offset & 0x3) == 0))
7512 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7513 else
7515 emit_move_insn (base_tmp, offset_rtx);
7516 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7519 else
7521 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7522 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7524 /* Note, ADDI requires the register being added to be a base
7525 register. If the register was R0, load it up into the temporary
7526 and do the add. */
7527 if (op1_reg_p
7528 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7530 insn = gen_add3_insn (base_tmp, op1, element_offset);
7531 gcc_assert (insn != NULL_RTX);
7532 emit_insn (insn);
7535 else if (ele_reg_p
7536 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7538 insn = gen_add3_insn (base_tmp, element_offset, op1);
7539 gcc_assert (insn != NULL_RTX);
7540 emit_insn (insn);
7543 else
7545 emit_move_insn (base_tmp, op1);
7546 emit_insn (gen_add2_insn (base_tmp, element_offset));
7549 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7553 else
7555 emit_move_insn (base_tmp, addr);
7556 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7559 /* If we have a PLUS, we need to see whether the particular register class
7560 allows for D-FORM or X-FORM addressing. */
7561 if (GET_CODE (new_addr) == PLUS)
7563 rtx op1 = XEXP (new_addr, 1);
7564 addr_mask_type addr_mask;
7565 int scalar_regno = regno_or_subregno (scalar_reg);
7567 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7568 if (INT_REGNO_P (scalar_regno))
7569 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7571 else if (FP_REGNO_P (scalar_regno))
7572 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7574 else if (ALTIVEC_REGNO_P (scalar_regno))
7575 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7577 else
7578 gcc_unreachable ();
7580 if (REG_P (op1) || SUBREG_P (op1))
7581 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7582 else
7583 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7586 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7587 valid_addr_p = true;
7589 else
7590 valid_addr_p = false;
7592 if (!valid_addr_p)
7594 emit_move_insn (base_tmp, new_addr);
7595 new_addr = base_tmp;
7598 return change_address (mem, scalar_mode, new_addr);
7601 /* Split a variable vec_extract operation into the component instructions. */
7603 void
7604 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7605 rtx tmp_altivec)
7607 machine_mode mode = GET_MODE (src);
7608 machine_mode scalar_mode = GET_MODE (dest);
7609 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7610 int byte_shift = exact_log2 (scalar_size);
7612 gcc_assert (byte_shift >= 0);
7614 /* If we are given a memory address, optimize to load just the element. We
7615 don't have to adjust the vector element number on little endian
7616 systems. */
7617 if (MEM_P (src))
7619 gcc_assert (REG_P (tmp_gpr));
7620 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7621 tmp_gpr, scalar_mode));
7622 return;
7625 else if (REG_P (src) || SUBREG_P (src))
7627 int bit_shift = byte_shift + 3;
7628 rtx element2;
7629 int dest_regno = regno_or_subregno (dest);
7630 int src_regno = regno_or_subregno (src);
7631 int element_regno = regno_or_subregno (element);
7633 gcc_assert (REG_P (tmp_gpr));
7635 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7636 a general purpose register. */
7637 if (TARGET_P9_VECTOR
7638 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7639 && INT_REGNO_P (dest_regno)
7640 && ALTIVEC_REGNO_P (src_regno)
7641 && INT_REGNO_P (element_regno))
7643 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7644 rtx element_si = gen_rtx_REG (SImode, element_regno);
7646 if (mode == V16QImode)
7647 emit_insn (VECTOR_ELT_ORDER_BIG
7648 ? gen_vextublx (dest_si, element_si, src)
7649 : gen_vextubrx (dest_si, element_si, src));
7651 else if (mode == V8HImode)
7653 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7654 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7655 emit_insn (VECTOR_ELT_ORDER_BIG
7656 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7657 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7661 else
7663 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7664 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7665 emit_insn (VECTOR_ELT_ORDER_BIG
7666 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7667 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7670 return;
7674 gcc_assert (REG_P (tmp_altivec));
7676 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7677 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7678 will shift the element into the upper position (adding 3 to convert a
7679 byte shift into a bit shift). */
7680 if (scalar_size == 8)
7682 if (!VECTOR_ELT_ORDER_BIG)
7684 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7685 element2 = tmp_gpr;
7687 else
7688 element2 = element;
7690 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7691 bit. */
7692 emit_insn (gen_rtx_SET (tmp_gpr,
7693 gen_rtx_AND (DImode,
7694 gen_rtx_ASHIFT (DImode,
7695 element2,
7696 GEN_INT (6)),
7697 GEN_INT (64))));
7699 else
7701 if (!VECTOR_ELT_ORDER_BIG)
7703 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7705 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7706 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7707 element2 = tmp_gpr;
7709 else
7710 element2 = element;
7712 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7715 /* Get the value into the lower byte of the Altivec register where VSLO
7716 expects it. */
7717 if (TARGET_P9_VECTOR)
7718 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7719 else if (can_create_pseudo_p ())
7720 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7721 else
7723 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7724 emit_move_insn (tmp_di, tmp_gpr);
7725 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7728 /* Do the VSLO to get the value into the final location. */
7729 switch (mode)
7731 case E_V2DFmode:
7732 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7733 return;
7735 case E_V2DImode:
7736 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7737 return;
7739 case E_V4SFmode:
7741 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7742 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7743 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7744 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7745 tmp_altivec));
7747 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7748 return;
7751 case E_V4SImode:
7752 case E_V8HImode:
7753 case E_V16QImode:
7755 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7756 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7757 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7758 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7759 tmp_altivec));
7760 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7761 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7762 GEN_INT (64 - (8 * scalar_size))));
7763 return;
7766 default:
7767 gcc_unreachable ();
7770 return;
7772 else
7773 gcc_unreachable ();
7776 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7777 two SImode values. */
7779 static void
7780 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7782 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7784 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7786 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7787 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7789 emit_move_insn (dest, GEN_INT (const1 | const2));
7790 return;
7793 /* Put si1 into upper 32-bits of dest. */
7794 if (CONST_INT_P (si1))
7795 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7796 else
7798 /* Generate RLDIC. */
7799 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7800 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7801 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7802 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7803 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7804 emit_insn (gen_rtx_SET (dest, and_rtx));
7807 /* Put si2 into the temporary. */
7808 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7809 if (CONST_INT_P (si2))
7810 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7811 else
7812 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7814 /* Combine the two parts. */
7815 emit_insn (gen_iordi3 (dest, dest, tmp));
7816 return;
7819 /* Split a V4SI initialization. */
7821 void
7822 rs6000_split_v4si_init (rtx operands[])
7824 rtx dest = operands[0];
7826 /* Destination is a GPR, build up the two DImode parts in place. */
7827 if (REG_P (dest) || SUBREG_P (dest))
7829 int d_regno = regno_or_subregno (dest);
7830 rtx scalar1 = operands[1];
7831 rtx scalar2 = operands[2];
7832 rtx scalar3 = operands[3];
7833 rtx scalar4 = operands[4];
7834 rtx tmp1 = operands[5];
7835 rtx tmp2 = operands[6];
7837 /* Even though we only need one temporary (plus the destination, which
7838 has an early clobber constraint, try to use two temporaries, one for
7839 each double word created. That way the 2nd insn scheduling pass can
7840 rearrange things so the two parts are done in parallel. */
7841 if (BYTES_BIG_ENDIAN)
7843 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7844 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7845 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7846 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7848 else
7850 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7851 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7852 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7853 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7854 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7856 return;
7859 else
7860 gcc_unreachable ();
7863 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7864 selects whether the alignment is abi mandated, optional, or
7865 both abi and optional alignment. */
7867 unsigned int
7868 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7870 if (how != align_opt)
7872 if (TREE_CODE (type) == VECTOR_TYPE)
7874 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7876 if (align < 64)
7877 align = 64;
7879 else if (align < 128)
7880 align = 128;
7884 if (how != align_abi)
7886 if (TREE_CODE (type) == ARRAY_TYPE
7887 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7889 if (align < BITS_PER_WORD)
7890 align = BITS_PER_WORD;
7894 return align;
7897 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7898 instructions simply ignore the low bits; VSX memory instructions
7899 are aligned to 4 or 8 bytes. */
7901 static bool
7902 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7904 return (STRICT_ALIGNMENT
7905 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7906 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7907 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7908 && (int) align < VECTOR_ALIGN (mode)))));
7911 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7913 bool
7914 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7916 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7918 if (computed != 128)
7920 static bool warned;
7921 if (!warned && warn_psabi)
7923 warned = true;
7924 inform (input_location,
7925 "the layout of aggregates containing vectors with"
7926 " %d-byte alignment has changed in GCC 5",
7927 computed / BITS_PER_UNIT);
7930 /* In current GCC there is no special case. */
7931 return false;
7934 return false;
7937 /* AIX increases natural record alignment to doubleword if the first
7938 field is an FP double while the FP fields remain word aligned. */
7940 unsigned int
7941 rs6000_special_round_type_align (tree type, unsigned int computed,
7942 unsigned int specified)
7944 unsigned int align = MAX (computed, specified);
7945 tree field = TYPE_FIELDS (type);
7947 /* Skip all non field decls */
7948 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7949 field = DECL_CHAIN (field);
7951 if (field != NULL && field != type)
7953 type = TREE_TYPE (field);
7954 while (TREE_CODE (type) == ARRAY_TYPE)
7955 type = TREE_TYPE (type);
7957 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7958 align = MAX (align, 64);
7961 return align;
7964 /* Darwin increases record alignment to the natural alignment of
7965 the first field. */
7967 unsigned int
7968 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7969 unsigned int specified)
7971 unsigned int align = MAX (computed, specified);
7973 if (TYPE_PACKED (type))
7974 return align;
7976 /* Find the first field, looking down into aggregates. */
7977 do {
7978 tree field = TYPE_FIELDS (type);
7979 /* Skip all non field decls */
7980 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7981 field = DECL_CHAIN (field);
7982 if (! field)
7983 break;
7984 /* A packed field does not contribute any extra alignment. */
7985 if (DECL_PACKED (field))
7986 return align;
7987 type = TREE_TYPE (field);
7988 while (TREE_CODE (type) == ARRAY_TYPE)
7989 type = TREE_TYPE (type);
7990 } while (AGGREGATE_TYPE_P (type));
7992 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7993 align = MAX (align, TYPE_ALIGN (type));
7995 return align;
7998 /* Return 1 for an operand in small memory on V.4/eabi. */
8001 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8002 machine_mode mode ATTRIBUTE_UNUSED)
8004 #if TARGET_ELF
8005 rtx sym_ref;
8007 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8008 return 0;
8010 if (DEFAULT_ABI != ABI_V4)
8011 return 0;
8013 if (GET_CODE (op) == SYMBOL_REF)
8014 sym_ref = op;
8016 else if (GET_CODE (op) != CONST
8017 || GET_CODE (XEXP (op, 0)) != PLUS
8018 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8019 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8020 return 0;
8022 else
8024 rtx sum = XEXP (op, 0);
8025 HOST_WIDE_INT summand;
8027 /* We have to be careful here, because it is the referenced address
8028 that must be 32k from _SDA_BASE_, not just the symbol. */
8029 summand = INTVAL (XEXP (sum, 1));
8030 if (summand < 0 || summand > g_switch_value)
8031 return 0;
8033 sym_ref = XEXP (sum, 0);
8036 return SYMBOL_REF_SMALL_P (sym_ref);
8037 #else
8038 return 0;
8039 #endif
8042 /* Return true if either operand is a general purpose register. */
8044 bool
8045 gpr_or_gpr_p (rtx op0, rtx op1)
8047 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8048 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8051 /* Return true if this is a move direct operation between GPR registers and
8052 floating point/VSX registers. */
8054 bool
8055 direct_move_p (rtx op0, rtx op1)
8057 int regno0, regno1;
8059 if (!REG_P (op0) || !REG_P (op1))
8060 return false;
8062 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8063 return false;
8065 regno0 = REGNO (op0);
8066 regno1 = REGNO (op1);
8067 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8068 return false;
8070 if (INT_REGNO_P (regno0))
8071 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8073 else if (INT_REGNO_P (regno1))
8075 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8076 return true;
8078 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8079 return true;
8082 return false;
8085 /* Return true if the OFFSET is valid for the quad address instructions that
8086 use d-form (register + offset) addressing. */
8088 static inline bool
8089 quad_address_offset_p (HOST_WIDE_INT offset)
8091 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8094 /* Return true if the ADDR is an acceptable address for a quad memory
8095 operation of mode MODE (either LQ/STQ for general purpose registers, or
8096 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8097 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8098 3.0 LXV/STXV instruction. */
8100 bool
8101 quad_address_p (rtx addr, machine_mode mode, bool strict)
8103 rtx op0, op1;
8105 if (GET_MODE_SIZE (mode) != 16)
8106 return false;
8108 if (legitimate_indirect_address_p (addr, strict))
8109 return true;
8111 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8112 return false;
8114 if (GET_CODE (addr) != PLUS)
8115 return false;
8117 op0 = XEXP (addr, 0);
8118 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8119 return false;
8121 op1 = XEXP (addr, 1);
8122 if (!CONST_INT_P (op1))
8123 return false;
8125 return quad_address_offset_p (INTVAL (op1));
8128 /* Return true if this is a load or store quad operation. This function does
8129 not handle the atomic quad memory instructions. */
8131 bool
8132 quad_load_store_p (rtx op0, rtx op1)
8134 bool ret;
8136 if (!TARGET_QUAD_MEMORY)
8137 ret = false;
8139 else if (REG_P (op0) && MEM_P (op1))
8140 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8141 && quad_memory_operand (op1, GET_MODE (op1))
8142 && !reg_overlap_mentioned_p (op0, op1));
8144 else if (MEM_P (op0) && REG_P (op1))
8145 ret = (quad_memory_operand (op0, GET_MODE (op0))
8146 && quad_int_reg_operand (op1, GET_MODE (op1)));
8148 else
8149 ret = false;
8151 if (TARGET_DEBUG_ADDR)
8153 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8154 ret ? "true" : "false");
8155 debug_rtx (gen_rtx_SET (op0, op1));
8158 return ret;
8161 /* Given an address, return a constant offset term if one exists. */
8163 static rtx
8164 address_offset (rtx op)
8166 if (GET_CODE (op) == PRE_INC
8167 || GET_CODE (op) == PRE_DEC)
8168 op = XEXP (op, 0);
8169 else if (GET_CODE (op) == PRE_MODIFY
8170 || GET_CODE (op) == LO_SUM)
8171 op = XEXP (op, 1);
8173 if (GET_CODE (op) == CONST)
8174 op = XEXP (op, 0);
8176 if (GET_CODE (op) == PLUS)
8177 op = XEXP (op, 1);
8179 if (CONST_INT_P (op))
8180 return op;
8182 return NULL_RTX;
8185 /* Return true if the MEM operand is a memory operand suitable for use
8186 with a (full width, possibly multiple) gpr load/store. On
8187 powerpc64 this means the offset must be divisible by 4.
8188 Implements 'Y' constraint.
8190 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8191 a constraint function we know the operand has satisfied a suitable
8192 memory predicate. Also accept some odd rtl generated by reload
8193 (see rs6000_legitimize_reload_address for various forms). It is
8194 important that reload rtl be accepted by appropriate constraints
8195 but not by the operand predicate.
8197 Offsetting a lo_sum should not be allowed, except where we know by
8198 alignment that a 32k boundary is not crossed, but see the ???
8199 comment in rs6000_legitimize_reload_address. Note that by
8200 "offsetting" here we mean a further offset to access parts of the
8201 MEM. It's fine to have a lo_sum where the inner address is offset
8202 from a sym, since the same sym+offset will appear in the high part
8203 of the address calculation. */
8205 bool
8206 mem_operand_gpr (rtx op, machine_mode mode)
8208 unsigned HOST_WIDE_INT offset;
8209 int extra;
8210 rtx addr = XEXP (op, 0);
8212 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
8213 if (!rs6000_offsettable_memref_p (op, mode, false))
8214 return false;
8216 op = address_offset (addr);
8217 if (op == NULL_RTX)
8218 return true;
8220 offset = INTVAL (op);
8221 if (TARGET_POWERPC64 && (offset & 3) != 0)
8222 return false;
8224 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8225 if (extra < 0)
8226 extra = 0;
8228 if (GET_CODE (addr) == LO_SUM)
8229 /* For lo_sum addresses, we must allow any offset except one that
8230 causes a wrap, so test only the low 16 bits. */
8231 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8233 return offset + 0x8000 < 0x10000u - extra;
8236 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8237 enforce an offset divisible by 4 even for 32-bit. */
8239 bool
8240 mem_operand_ds_form (rtx op, machine_mode mode)
8242 unsigned HOST_WIDE_INT offset;
8243 int extra;
8244 rtx addr = XEXP (op, 0);
8246 if (!offsettable_address_p (false, mode, addr))
8247 return false;
8249 op = address_offset (addr);
8250 if (op == NULL_RTX)
8251 return true;
8253 offset = INTVAL (op);
8254 if ((offset & 3) != 0)
8255 return false;
8257 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8258 if (extra < 0)
8259 extra = 0;
8261 if (GET_CODE (addr) == LO_SUM)
8262 /* For lo_sum addresses, we must allow any offset except one that
8263 causes a wrap, so test only the low 16 bits. */
8264 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8266 return offset + 0x8000 < 0x10000u - extra;
8269 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8271 static bool
8272 reg_offset_addressing_ok_p (machine_mode mode)
8274 switch (mode)
8276 case E_V16QImode:
8277 case E_V8HImode:
8278 case E_V4SFmode:
8279 case E_V4SImode:
8280 case E_V2DFmode:
8281 case E_V2DImode:
8282 case E_V1TImode:
8283 case E_TImode:
8284 case E_TFmode:
8285 case E_KFmode:
8286 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8287 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8288 a vector mode, if we want to use the VSX registers to move it around,
8289 we need to restrict ourselves to reg+reg addressing. Similarly for
8290 IEEE 128-bit floating point that is passed in a single vector
8291 register. */
8292 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8293 return mode_supports_vsx_dform_quad (mode);
8294 break;
8296 case E_V2SImode:
8297 case E_V2SFmode:
8298 /* Paired vector modes. Only reg+reg addressing is valid. */
8299 if (TARGET_PAIRED_FLOAT)
8300 return false;
8301 break;
8303 case E_SDmode:
8304 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8305 addressing for the LFIWZX and STFIWX instructions. */
8306 if (TARGET_NO_SDMODE_STACK)
8307 return false;
8308 break;
8310 default:
8311 break;
8314 return true;
8317 static bool
8318 virtual_stack_registers_memory_p (rtx op)
8320 int regnum;
8322 if (GET_CODE (op) == REG)
8323 regnum = REGNO (op);
8325 else if (GET_CODE (op) == PLUS
8326 && GET_CODE (XEXP (op, 0)) == REG
8327 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8328 regnum = REGNO (XEXP (op, 0));
8330 else
8331 return false;
8333 return (regnum >= FIRST_VIRTUAL_REGISTER
8334 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8337 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8338 is known to not straddle a 32k boundary. This function is used
8339 to determine whether -mcmodel=medium code can use TOC pointer
8340 relative addressing for OP. This means the alignment of the TOC
8341 pointer must also be taken into account, and unfortunately that is
8342 only 8 bytes. */
8344 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8345 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8346 #endif
8348 static bool
8349 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8350 machine_mode mode)
8352 tree decl;
8353 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8355 if (GET_CODE (op) != SYMBOL_REF)
8356 return false;
8358 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8359 SYMBOL_REF. */
8360 if (mode_supports_vsx_dform_quad (mode))
8361 return false;
8363 dsize = GET_MODE_SIZE (mode);
8364 decl = SYMBOL_REF_DECL (op);
8365 if (!decl)
8367 if (dsize == 0)
8368 return false;
8370 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8371 replacing memory addresses with an anchor plus offset. We
8372 could find the decl by rummaging around in the block->objects
8373 VEC for the given offset but that seems like too much work. */
8374 dalign = BITS_PER_UNIT;
8375 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8376 && SYMBOL_REF_ANCHOR_P (op)
8377 && SYMBOL_REF_BLOCK (op) != NULL)
8379 struct object_block *block = SYMBOL_REF_BLOCK (op);
8381 dalign = block->alignment;
8382 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8384 else if (CONSTANT_POOL_ADDRESS_P (op))
8386 /* It would be nice to have get_pool_align().. */
8387 machine_mode cmode = get_pool_mode (op);
8389 dalign = GET_MODE_ALIGNMENT (cmode);
8392 else if (DECL_P (decl))
8394 dalign = DECL_ALIGN (decl);
8396 if (dsize == 0)
8398 /* Allow BLKmode when the entire object is known to not
8399 cross a 32k boundary. */
8400 if (!DECL_SIZE_UNIT (decl))
8401 return false;
8403 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8404 return false;
8406 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8407 if (dsize > 32768)
8408 return false;
8410 dalign /= BITS_PER_UNIT;
8411 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8412 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8413 return dalign >= dsize;
8416 else
8417 gcc_unreachable ();
8419 /* Find how many bits of the alignment we know for this access. */
8420 dalign /= BITS_PER_UNIT;
8421 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8422 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8423 mask = dalign - 1;
8424 lsb = offset & -offset;
8425 mask &= lsb - 1;
8426 dalign = mask + 1;
8428 return dalign >= dsize;
8431 static bool
8432 constant_pool_expr_p (rtx op)
8434 rtx base, offset;
8436 split_const (op, &base, &offset);
8437 return (GET_CODE (base) == SYMBOL_REF
8438 && CONSTANT_POOL_ADDRESS_P (base)
8439 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8442 /* These are only used to pass through from print_operand/print_operand_address
8443 to rs6000_output_addr_const_extra over the intervening function
8444 output_addr_const which is not target code. */
8445 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8447 /* Return true if OP is a toc pointer relative address (the output
8448 of create_TOC_reference). If STRICT, do not match non-split
8449 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8450 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8451 TOCREL_OFFSET_RET respectively. */
8453 bool
8454 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8455 const_rtx *tocrel_offset_ret)
8457 if (!TARGET_TOC)
8458 return false;
8460 if (TARGET_CMODEL != CMODEL_SMALL)
8462 /* When strict ensure we have everything tidy. */
8463 if (strict
8464 && !(GET_CODE (op) == LO_SUM
8465 && REG_P (XEXP (op, 0))
8466 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8467 return false;
8469 /* When not strict, allow non-split TOC addresses and also allow
8470 (lo_sum (high ..)) TOC addresses created during reload. */
8471 if (GET_CODE (op) == LO_SUM)
8472 op = XEXP (op, 1);
8475 const_rtx tocrel_base = op;
8476 const_rtx tocrel_offset = const0_rtx;
8478 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8480 tocrel_base = XEXP (op, 0);
8481 tocrel_offset = XEXP (op, 1);
8484 if (tocrel_base_ret)
8485 *tocrel_base_ret = tocrel_base;
8486 if (tocrel_offset_ret)
8487 *tocrel_offset_ret = tocrel_offset;
8489 return (GET_CODE (tocrel_base) == UNSPEC
8490 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8493 /* Return true if X is a constant pool address, and also for cmodel=medium
8494 if X is a toc-relative address known to be offsettable within MODE. */
8496 bool
8497 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8498 bool strict)
8500 const_rtx tocrel_base, tocrel_offset;
8501 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8502 && (TARGET_CMODEL != CMODEL_MEDIUM
8503 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8504 || mode == QImode
8505 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8506 INTVAL (tocrel_offset), mode)));
8509 static bool
8510 legitimate_small_data_p (machine_mode mode, rtx x)
8512 return (DEFAULT_ABI == ABI_V4
8513 && !flag_pic && !TARGET_TOC
8514 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8515 && small_data_operand (x, mode));
8518 bool
8519 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8520 bool strict, bool worst_case)
8522 unsigned HOST_WIDE_INT offset;
8523 unsigned int extra;
8525 if (GET_CODE (x) != PLUS)
8526 return false;
8527 if (!REG_P (XEXP (x, 0)))
8528 return false;
8529 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8530 return false;
8531 if (mode_supports_vsx_dform_quad (mode))
8532 return quad_address_p (x, mode, strict);
8533 if (!reg_offset_addressing_ok_p (mode))
8534 return virtual_stack_registers_memory_p (x);
8535 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8536 return true;
8537 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8538 return false;
8540 offset = INTVAL (XEXP (x, 1));
8541 extra = 0;
8542 switch (mode)
8544 case E_V2SImode:
8545 case E_V2SFmode:
8546 /* Paired single modes: offset addressing isn't valid. */
8547 return false;
8549 case E_DFmode:
8550 case E_DDmode:
8551 case E_DImode:
8552 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8553 addressing. */
8554 if (VECTOR_MEM_VSX_P (mode))
8555 return false;
8557 if (!worst_case)
8558 break;
8559 if (!TARGET_POWERPC64)
8560 extra = 4;
8561 else if (offset & 3)
8562 return false;
8563 break;
8565 case E_TFmode:
8566 case E_IFmode:
8567 case E_KFmode:
8568 case E_TDmode:
8569 case E_TImode:
8570 case E_PTImode:
8571 extra = 8;
8572 if (!worst_case)
8573 break;
8574 if (!TARGET_POWERPC64)
8575 extra = 12;
8576 else if (offset & 3)
8577 return false;
8578 break;
8580 default:
8581 break;
8584 offset += 0x8000;
8585 return offset < 0x10000 - extra;
8588 bool
8589 legitimate_indexed_address_p (rtx x, int strict)
8591 rtx op0, op1;
8593 if (GET_CODE (x) != PLUS)
8594 return false;
8596 op0 = XEXP (x, 0);
8597 op1 = XEXP (x, 1);
8599 return (REG_P (op0) && REG_P (op1)
8600 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8601 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8602 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8603 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8606 bool
8607 avoiding_indexed_address_p (machine_mode mode)
8609 /* Avoid indexed addressing for modes that have non-indexed
8610 load/store instruction forms. */
8611 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8614 bool
8615 legitimate_indirect_address_p (rtx x, int strict)
8617 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8620 bool
8621 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8623 if (!TARGET_MACHO || !flag_pic
8624 || mode != SImode || GET_CODE (x) != MEM)
8625 return false;
8626 x = XEXP (x, 0);
8628 if (GET_CODE (x) != LO_SUM)
8629 return false;
8630 if (GET_CODE (XEXP (x, 0)) != REG)
8631 return false;
8632 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8633 return false;
8634 x = XEXP (x, 1);
8636 return CONSTANT_P (x);
8639 static bool
8640 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8642 if (GET_CODE (x) != LO_SUM)
8643 return false;
8644 if (GET_CODE (XEXP (x, 0)) != REG)
8645 return false;
8646 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8647 return false;
8648 /* quad word addresses are restricted, and we can't use LO_SUM. */
8649 if (mode_supports_vsx_dform_quad (mode))
8650 return false;
8651 x = XEXP (x, 1);
8653 if (TARGET_ELF || TARGET_MACHO)
8655 bool large_toc_ok;
8657 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8658 return false;
8659 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8660 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8661 recognizes some LO_SUM addresses as valid although this
8662 function says opposite. In most cases, LRA through different
8663 transformations can generate correct code for address reloads.
8664 It can not manage only some LO_SUM cases. So we need to add
8665 code analogous to one in rs6000_legitimize_reload_address for
8666 LOW_SUM here saying that some addresses are still valid. */
8667 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8668 && small_toc_ref (x, VOIDmode));
8669 if (TARGET_TOC && ! large_toc_ok)
8670 return false;
8671 if (GET_MODE_NUNITS (mode) != 1)
8672 return false;
8673 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8674 && !(/* ??? Assume floating point reg based on mode? */
8675 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8676 && (mode == DFmode || mode == DDmode)))
8677 return false;
8679 return CONSTANT_P (x) || large_toc_ok;
8682 return false;
8686 /* Try machine-dependent ways of modifying an illegitimate address
8687 to be legitimate. If we find one, return the new, valid address.
8688 This is used from only one place: `memory_address' in explow.c.
8690 OLDX is the address as it was before break_out_memory_refs was
8691 called. In some cases it is useful to look at this to decide what
8692 needs to be done.
8694 It is always safe for this function to do nothing. It exists to
8695 recognize opportunities to optimize the output.
8697 On RS/6000, first check for the sum of a register with a constant
8698 integer that is out of range. If so, generate code to add the
8699 constant with the low-order 16 bits masked to the register and force
8700 this result into another register (this can be done with `cau').
8701 Then generate an address of REG+(CONST&0xffff), allowing for the
8702 possibility of bit 16 being a one.
8704 Then check for the sum of a register and something not constant, try to
8705 load the other things into a register and return the sum. */
8707 static rtx
8708 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8709 machine_mode mode)
8711 unsigned int extra;
8713 if (!reg_offset_addressing_ok_p (mode)
8714 || mode_supports_vsx_dform_quad (mode))
8716 if (virtual_stack_registers_memory_p (x))
8717 return x;
8719 /* In theory we should not be seeing addresses of the form reg+0,
8720 but just in case it is generated, optimize it away. */
8721 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8722 return force_reg (Pmode, XEXP (x, 0));
8724 /* For TImode with load/store quad, restrict addresses to just a single
8725 pointer, so it works with both GPRs and VSX registers. */
8726 /* Make sure both operands are registers. */
8727 else if (GET_CODE (x) == PLUS
8728 && (mode != TImode || !TARGET_VSX))
8729 return gen_rtx_PLUS (Pmode,
8730 force_reg (Pmode, XEXP (x, 0)),
8731 force_reg (Pmode, XEXP (x, 1)));
8732 else
8733 return force_reg (Pmode, x);
8735 if (GET_CODE (x) == SYMBOL_REF)
8737 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8738 if (model != 0)
8739 return rs6000_legitimize_tls_address (x, model);
8742 extra = 0;
8743 switch (mode)
8745 case E_TFmode:
8746 case E_TDmode:
8747 case E_TImode:
8748 case E_PTImode:
8749 case E_IFmode:
8750 case E_KFmode:
8751 /* As in legitimate_offset_address_p we do not assume
8752 worst-case. The mode here is just a hint as to the registers
8753 used. A TImode is usually in gprs, but may actually be in
8754 fprs. Leave worst-case scenario for reload to handle via
8755 insn constraints. PTImode is only GPRs. */
8756 extra = 8;
8757 break;
8758 default:
8759 break;
8762 if (GET_CODE (x) == PLUS
8763 && GET_CODE (XEXP (x, 0)) == REG
8764 && GET_CODE (XEXP (x, 1)) == CONST_INT
8765 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8766 >= 0x10000 - extra)
8767 && !PAIRED_VECTOR_MODE (mode))
8769 HOST_WIDE_INT high_int, low_int;
8770 rtx sum;
8771 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8772 if (low_int >= 0x8000 - extra)
8773 low_int = 0;
8774 high_int = INTVAL (XEXP (x, 1)) - low_int;
8775 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8776 GEN_INT (high_int)), 0);
8777 return plus_constant (Pmode, sum, low_int);
8779 else if (GET_CODE (x) == PLUS
8780 && GET_CODE (XEXP (x, 0)) == REG
8781 && GET_CODE (XEXP (x, 1)) != CONST_INT
8782 && GET_MODE_NUNITS (mode) == 1
8783 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8784 || (/* ??? Assume floating point reg based on mode? */
8785 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8786 && (mode == DFmode || mode == DDmode)))
8787 && !avoiding_indexed_address_p (mode))
8789 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8790 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8792 else if (PAIRED_VECTOR_MODE (mode))
8794 if (mode == DImode)
8795 return x;
8796 /* We accept [reg + reg]. */
8798 if (GET_CODE (x) == PLUS)
8800 rtx op1 = XEXP (x, 0);
8801 rtx op2 = XEXP (x, 1);
8802 rtx y;
8804 op1 = force_reg (Pmode, op1);
8805 op2 = force_reg (Pmode, op2);
8807 /* We can't always do [reg + reg] for these, because [reg +
8808 reg + offset] is not a legitimate addressing mode. */
8809 y = gen_rtx_PLUS (Pmode, op1, op2);
8811 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8812 return force_reg (Pmode, y);
8813 else
8814 return y;
8817 return force_reg (Pmode, x);
8819 else if ((TARGET_ELF
8820 #if TARGET_MACHO
8821 || !MACHO_DYNAMIC_NO_PIC_P
8822 #endif
8824 && TARGET_32BIT
8825 && TARGET_NO_TOC
8826 && ! flag_pic
8827 && GET_CODE (x) != CONST_INT
8828 && GET_CODE (x) != CONST_WIDE_INT
8829 && GET_CODE (x) != CONST_DOUBLE
8830 && CONSTANT_P (x)
8831 && GET_MODE_NUNITS (mode) == 1
8832 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8833 || (/* ??? Assume floating point reg based on mode? */
8834 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8835 && (mode == DFmode || mode == DDmode))))
8837 rtx reg = gen_reg_rtx (Pmode);
8838 if (TARGET_ELF)
8839 emit_insn (gen_elf_high (reg, x));
8840 else
8841 emit_insn (gen_macho_high (reg, x));
8842 return gen_rtx_LO_SUM (Pmode, reg, x);
8844 else if (TARGET_TOC
8845 && GET_CODE (x) == SYMBOL_REF
8846 && constant_pool_expr_p (x)
8847 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8848 return create_TOC_reference (x, NULL_RTX);
8849 else
8850 return x;
8853 /* Debug version of rs6000_legitimize_address. */
8854 static rtx
8855 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8857 rtx ret;
8858 rtx_insn *insns;
8860 start_sequence ();
8861 ret = rs6000_legitimize_address (x, oldx, mode);
8862 insns = get_insns ();
8863 end_sequence ();
8865 if (ret != x)
8867 fprintf (stderr,
8868 "\nrs6000_legitimize_address: mode %s, old code %s, "
8869 "new code %s, modified\n",
8870 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8871 GET_RTX_NAME (GET_CODE (ret)));
8873 fprintf (stderr, "Original address:\n");
8874 debug_rtx (x);
8876 fprintf (stderr, "oldx:\n");
8877 debug_rtx (oldx);
8879 fprintf (stderr, "New address:\n");
8880 debug_rtx (ret);
8882 if (insns)
8884 fprintf (stderr, "Insns added:\n");
8885 debug_rtx_list (insns, 20);
8888 else
8890 fprintf (stderr,
8891 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8892 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8894 debug_rtx (x);
8897 if (insns)
8898 emit_insn (insns);
8900 return ret;
8903 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8904 We need to emit DTP-relative relocations. */
8906 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8907 static void
8908 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8910 switch (size)
8912 case 4:
8913 fputs ("\t.long\t", file);
8914 break;
8915 case 8:
8916 fputs (DOUBLE_INT_ASM_OP, file);
8917 break;
8918 default:
8919 gcc_unreachable ();
8921 output_addr_const (file, x);
8922 if (TARGET_ELF)
8923 fputs ("@dtprel+0x8000", file);
8924 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8926 switch (SYMBOL_REF_TLS_MODEL (x))
8928 case 0:
8929 break;
8930 case TLS_MODEL_LOCAL_EXEC:
8931 fputs ("@le", file);
8932 break;
8933 case TLS_MODEL_INITIAL_EXEC:
8934 fputs ("@ie", file);
8935 break;
8936 case TLS_MODEL_GLOBAL_DYNAMIC:
8937 case TLS_MODEL_LOCAL_DYNAMIC:
8938 fputs ("@m", file);
8939 break;
8940 default:
8941 gcc_unreachable ();
8946 /* Return true if X is a symbol that refers to real (rather than emulated)
8947 TLS. */
8949 static bool
8950 rs6000_real_tls_symbol_ref_p (rtx x)
8952 return (GET_CODE (x) == SYMBOL_REF
8953 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8956 /* In the name of slightly smaller debug output, and to cater to
8957 general assembler lossage, recognize various UNSPEC sequences
8958 and turn them back into a direct symbol reference. */
8960 static rtx
8961 rs6000_delegitimize_address (rtx orig_x)
8963 rtx x, y, offset;
8965 orig_x = delegitimize_mem_from_attrs (orig_x);
8966 x = orig_x;
8967 if (MEM_P (x))
8968 x = XEXP (x, 0);
8970 y = x;
8971 if (TARGET_CMODEL != CMODEL_SMALL
8972 && GET_CODE (y) == LO_SUM)
8973 y = XEXP (y, 1);
8975 offset = NULL_RTX;
8976 if (GET_CODE (y) == PLUS
8977 && GET_MODE (y) == Pmode
8978 && CONST_INT_P (XEXP (y, 1)))
8980 offset = XEXP (y, 1);
8981 y = XEXP (y, 0);
8984 if (GET_CODE (y) == UNSPEC
8985 && XINT (y, 1) == UNSPEC_TOCREL)
8987 y = XVECEXP (y, 0, 0);
8989 #ifdef HAVE_AS_TLS
8990 /* Do not associate thread-local symbols with the original
8991 constant pool symbol. */
8992 if (TARGET_XCOFF
8993 && GET_CODE (y) == SYMBOL_REF
8994 && CONSTANT_POOL_ADDRESS_P (y)
8995 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8996 return orig_x;
8997 #endif
8999 if (offset != NULL_RTX)
9000 y = gen_rtx_PLUS (Pmode, y, offset);
9001 if (!MEM_P (orig_x))
9002 return y;
9003 else
9004 return replace_equiv_address_nv (orig_x, y);
9007 if (TARGET_MACHO
9008 && GET_CODE (orig_x) == LO_SUM
9009 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9011 y = XEXP (XEXP (orig_x, 1), 0);
9012 if (GET_CODE (y) == UNSPEC
9013 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9014 return XVECEXP (y, 0, 0);
9017 return orig_x;
9020 /* Return true if X shouldn't be emitted into the debug info.
9021 The linker doesn't like .toc section references from
9022 .debug_* sections, so reject .toc section symbols. */
9024 static bool
9025 rs6000_const_not_ok_for_debug_p (rtx x)
9027 if (GET_CODE (x) == UNSPEC)
9028 return true;
9029 if (GET_CODE (x) == SYMBOL_REF
9030 && CONSTANT_POOL_ADDRESS_P (x))
9032 rtx c = get_pool_constant (x);
9033 machine_mode cmode = get_pool_mode (x);
9034 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9035 return true;
9038 return false;
9042 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9044 static bool
9045 rs6000_legitimate_combined_insn (rtx_insn *insn)
9047 int icode = INSN_CODE (insn);
9049 /* Reject creating doloop insns. Combine should not be allowed
9050 to create these for a number of reasons:
9051 1) In a nested loop, if combine creates one of these in an
9052 outer loop and the register allocator happens to allocate ctr
9053 to the outer loop insn, then the inner loop can't use ctr.
9054 Inner loops ought to be more highly optimized.
9055 2) Combine often wants to create one of these from what was
9056 originally a three insn sequence, first combining the three
9057 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9058 allocated ctr, the splitter takes use back to the three insn
9059 sequence. It's better to stop combine at the two insn
9060 sequence.
9061 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9062 insns, the register allocator sometimes uses floating point
9063 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9064 jump insn and output reloads are not implemented for jumps,
9065 the ctrsi/ctrdi splitters need to handle all possible cases.
9066 That's a pain, and it gets to be seriously difficult when a
9067 splitter that runs after reload needs memory to transfer from
9068 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9069 for the difficult case. It's better to not create problems
9070 in the first place. */
9071 if (icode != CODE_FOR_nothing
9072 && (icode == CODE_FOR_bdz_si
9073 || icode == CODE_FOR_bdz_di
9074 || icode == CODE_FOR_bdnz_si
9075 || icode == CODE_FOR_bdnz_di
9076 || icode == CODE_FOR_bdztf_si
9077 || icode == CODE_FOR_bdztf_di
9078 || icode == CODE_FOR_bdnztf_si
9079 || icode == CODE_FOR_bdnztf_di))
9080 return false;
9082 return true;
9085 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9087 static GTY(()) rtx rs6000_tls_symbol;
9088 static rtx
9089 rs6000_tls_get_addr (void)
9091 if (!rs6000_tls_symbol)
9092 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9094 return rs6000_tls_symbol;
9097 /* Construct the SYMBOL_REF for TLS GOT references. */
9099 static GTY(()) rtx rs6000_got_symbol;
9100 static rtx
9101 rs6000_got_sym (void)
9103 if (!rs6000_got_symbol)
9105 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9106 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9107 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9110 return rs6000_got_symbol;
9113 /* AIX Thread-Local Address support. */
9115 static rtx
9116 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9118 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9119 const char *name;
9120 char *tlsname;
9122 name = XSTR (addr, 0);
9123 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9124 or the symbol will be in TLS private data section. */
9125 if (name[strlen (name) - 1] != ']'
9126 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9127 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9129 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9130 strcpy (tlsname, name);
9131 strcat (tlsname,
9132 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9133 tlsaddr = copy_rtx (addr);
9134 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9136 else
9137 tlsaddr = addr;
9139 /* Place addr into TOC constant pool. */
9140 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9142 /* Output the TOC entry and create the MEM referencing the value. */
9143 if (constant_pool_expr_p (XEXP (sym, 0))
9144 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9146 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9147 mem = gen_const_mem (Pmode, tocref);
9148 set_mem_alias_set (mem, get_TOC_alias_set ());
9150 else
9151 return sym;
9153 /* Use global-dynamic for local-dynamic. */
9154 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9155 || model == TLS_MODEL_LOCAL_DYNAMIC)
9157 /* Create new TOC reference for @m symbol. */
9158 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9159 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9160 strcpy (tlsname, "*LCM");
9161 strcat (tlsname, name + 3);
9162 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9163 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9164 tocref = create_TOC_reference (modaddr, NULL_RTX);
9165 rtx modmem = gen_const_mem (Pmode, tocref);
9166 set_mem_alias_set (modmem, get_TOC_alias_set ());
9168 rtx modreg = gen_reg_rtx (Pmode);
9169 emit_insn (gen_rtx_SET (modreg, modmem));
9171 tmpreg = gen_reg_rtx (Pmode);
9172 emit_insn (gen_rtx_SET (tmpreg, mem));
9174 dest = gen_reg_rtx (Pmode);
9175 if (TARGET_32BIT)
9176 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9177 else
9178 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9179 return dest;
9181 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9182 else if (TARGET_32BIT)
9184 tlsreg = gen_reg_rtx (SImode);
9185 emit_insn (gen_tls_get_tpointer (tlsreg));
9187 else
9188 tlsreg = gen_rtx_REG (DImode, 13);
9190 /* Load the TOC value into temporary register. */
9191 tmpreg = gen_reg_rtx (Pmode);
9192 emit_insn (gen_rtx_SET (tmpreg, mem));
9193 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9194 gen_rtx_MINUS (Pmode, addr, tlsreg));
9196 /* Add TOC symbol value to TLS pointer. */
9197 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9199 return dest;
9202 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9203 this (thread-local) address. */
9205 static rtx
9206 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9208 rtx dest, insn;
9210 if (TARGET_XCOFF)
9211 return rs6000_legitimize_tls_address_aix (addr, model);
9213 dest = gen_reg_rtx (Pmode);
9214 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9216 rtx tlsreg;
9218 if (TARGET_64BIT)
9220 tlsreg = gen_rtx_REG (Pmode, 13);
9221 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9223 else
9225 tlsreg = gen_rtx_REG (Pmode, 2);
9226 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9228 emit_insn (insn);
9230 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9232 rtx tlsreg, tmp;
9234 tmp = gen_reg_rtx (Pmode);
9235 if (TARGET_64BIT)
9237 tlsreg = gen_rtx_REG (Pmode, 13);
9238 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9240 else
9242 tlsreg = gen_rtx_REG (Pmode, 2);
9243 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9245 emit_insn (insn);
9246 if (TARGET_64BIT)
9247 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9248 else
9249 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9250 emit_insn (insn);
9252 else
9254 rtx r3, got, tga, tmp1, tmp2, call_insn;
9256 /* We currently use relocations like @got@tlsgd for tls, which
9257 means the linker will handle allocation of tls entries, placing
9258 them in the .got section. So use a pointer to the .got section,
9259 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9260 or to secondary GOT sections used by 32-bit -fPIC. */
9261 if (TARGET_64BIT)
9262 got = gen_rtx_REG (Pmode, 2);
9263 else
9265 if (flag_pic == 1)
9266 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9267 else
9269 rtx gsym = rs6000_got_sym ();
9270 got = gen_reg_rtx (Pmode);
9271 if (flag_pic == 0)
9272 rs6000_emit_move (got, gsym, Pmode);
9273 else
9275 rtx mem, lab;
9277 tmp1 = gen_reg_rtx (Pmode);
9278 tmp2 = gen_reg_rtx (Pmode);
9279 mem = gen_const_mem (Pmode, tmp1);
9280 lab = gen_label_rtx ();
9281 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9282 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9283 if (TARGET_LINK_STACK)
9284 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9285 emit_move_insn (tmp2, mem);
9286 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9287 set_unique_reg_note (last, REG_EQUAL, gsym);
9292 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9294 tga = rs6000_tls_get_addr ();
9295 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9296 const0_rtx, Pmode);
9298 r3 = gen_rtx_REG (Pmode, 3);
9299 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9301 if (TARGET_64BIT)
9302 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9303 else
9304 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9306 else if (DEFAULT_ABI == ABI_V4)
9307 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9308 else
9309 gcc_unreachable ();
9310 call_insn = last_call_insn ();
9311 PATTERN (call_insn) = insn;
9312 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9313 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9314 pic_offset_table_rtx);
9316 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9318 tga = rs6000_tls_get_addr ();
9319 tmp1 = gen_reg_rtx (Pmode);
9320 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9321 const0_rtx, Pmode);
9323 r3 = gen_rtx_REG (Pmode, 3);
9324 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9326 if (TARGET_64BIT)
9327 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9328 else
9329 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9331 else if (DEFAULT_ABI == ABI_V4)
9332 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9333 else
9334 gcc_unreachable ();
9335 call_insn = last_call_insn ();
9336 PATTERN (call_insn) = insn;
9337 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9338 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9339 pic_offset_table_rtx);
9341 if (rs6000_tls_size == 16)
9343 if (TARGET_64BIT)
9344 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9345 else
9346 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9348 else if (rs6000_tls_size == 32)
9350 tmp2 = gen_reg_rtx (Pmode);
9351 if (TARGET_64BIT)
9352 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9353 else
9354 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9355 emit_insn (insn);
9356 if (TARGET_64BIT)
9357 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9358 else
9359 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9361 else
9363 tmp2 = gen_reg_rtx (Pmode);
9364 if (TARGET_64BIT)
9365 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9366 else
9367 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9368 emit_insn (insn);
9369 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9371 emit_insn (insn);
9373 else
9375 /* IE, or 64-bit offset LE. */
9376 tmp2 = gen_reg_rtx (Pmode);
9377 if (TARGET_64BIT)
9378 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9379 else
9380 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9381 emit_insn (insn);
9382 if (TARGET_64BIT)
9383 insn = gen_tls_tls_64 (dest, tmp2, addr);
9384 else
9385 insn = gen_tls_tls_32 (dest, tmp2, addr);
9386 emit_insn (insn);
9390 return dest;
9393 /* Only create the global variable for the stack protect guard if we are using
9394 the global flavor of that guard. */
9395 static tree
9396 rs6000_init_stack_protect_guard (void)
9398 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9399 return default_stack_protect_guard ();
9401 return NULL_TREE;
9404 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9406 static bool
9407 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9409 if (GET_CODE (x) == HIGH
9410 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9411 return true;
9413 /* A TLS symbol in the TOC cannot contain a sum. */
9414 if (GET_CODE (x) == CONST
9415 && GET_CODE (XEXP (x, 0)) == PLUS
9416 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9417 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9418 return true;
9420 /* Do not place an ELF TLS symbol in the constant pool. */
9421 return TARGET_ELF && tls_referenced_p (x);
9424 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9425 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9426 can be addressed relative to the toc pointer. */
9428 static bool
9429 use_toc_relative_ref (rtx sym, machine_mode mode)
9431 return ((constant_pool_expr_p (sym)
9432 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9433 get_pool_mode (sym)))
9434 || (TARGET_CMODEL == CMODEL_MEDIUM
9435 && SYMBOL_REF_LOCAL_P (sym)
9436 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9439 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9440 replace the input X, or the original X if no replacement is called for.
9441 The output parameter *WIN is 1 if the calling macro should goto WIN,
9442 0 if it should not.
9444 For RS/6000, we wish to handle large displacements off a base
9445 register by splitting the addend across an addiu/addis and the mem insn.
9446 This cuts number of extra insns needed from 3 to 1.
9448 On Darwin, we use this to generate code for floating point constants.
9449 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9450 The Darwin code is inside #if TARGET_MACHO because only then are the
9451 machopic_* functions defined. */
9452 static rtx
9453 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9454 int opnum, int type,
9455 int ind_levels ATTRIBUTE_UNUSED, int *win)
9457 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9458 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9460 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9461 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9462 if (reg_offset_p
9463 && opnum == 1
9464 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9465 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9466 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9467 && TARGET_P9_VECTOR)
9468 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9469 && TARGET_P9_VECTOR)))
9470 reg_offset_p = false;
9472 /* We must recognize output that we have already generated ourselves. */
9473 if (GET_CODE (x) == PLUS
9474 && GET_CODE (XEXP (x, 0)) == PLUS
9475 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9476 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9477 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9479 if (TARGET_DEBUG_ADDR)
9481 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9482 debug_rtx (x);
9484 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9485 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9486 opnum, (enum reload_type) type);
9487 *win = 1;
9488 return x;
9491 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9492 if (GET_CODE (x) == LO_SUM
9493 && GET_CODE (XEXP (x, 0)) == HIGH)
9495 if (TARGET_DEBUG_ADDR)
9497 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9498 debug_rtx (x);
9500 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9501 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9502 opnum, (enum reload_type) type);
9503 *win = 1;
9504 return x;
9507 #if TARGET_MACHO
9508 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9509 && GET_CODE (x) == LO_SUM
9510 && GET_CODE (XEXP (x, 0)) == PLUS
9511 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9512 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9513 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9514 && machopic_operand_p (XEXP (x, 1)))
9516 /* Result of previous invocation of this function on Darwin
9517 floating point constant. */
9518 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9519 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9520 opnum, (enum reload_type) type);
9521 *win = 1;
9522 return x;
9524 #endif
9526 if (TARGET_CMODEL != CMODEL_SMALL
9527 && reg_offset_p
9528 && !quad_offset_p
9529 && small_toc_ref (x, VOIDmode))
9531 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9532 x = gen_rtx_LO_SUM (Pmode, hi, x);
9533 if (TARGET_DEBUG_ADDR)
9535 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9536 debug_rtx (x);
9538 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9539 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9540 opnum, (enum reload_type) type);
9541 *win = 1;
9542 return x;
9545 if (GET_CODE (x) == PLUS
9546 && REG_P (XEXP (x, 0))
9547 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9548 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9549 && CONST_INT_P (XEXP (x, 1))
9550 && reg_offset_p
9551 && !PAIRED_VECTOR_MODE (mode)
9552 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9554 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9555 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9556 HOST_WIDE_INT high
9557 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9559 /* Check for 32-bit overflow or quad addresses with one of the
9560 four least significant bits set. */
9561 if (high + low != val
9562 || (quad_offset_p && (low & 0xf)))
9564 *win = 0;
9565 return x;
9568 /* Reload the high part into a base reg; leave the low part
9569 in the mem directly. */
9571 x = gen_rtx_PLUS (GET_MODE (x),
9572 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9573 GEN_INT (high)),
9574 GEN_INT (low));
9576 if (TARGET_DEBUG_ADDR)
9578 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9579 debug_rtx (x);
9581 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9582 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9583 opnum, (enum reload_type) type);
9584 *win = 1;
9585 return x;
9588 if (GET_CODE (x) == SYMBOL_REF
9589 && reg_offset_p
9590 && !quad_offset_p
9591 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9592 && !PAIRED_VECTOR_MODE (mode)
9593 #if TARGET_MACHO
9594 && DEFAULT_ABI == ABI_DARWIN
9595 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9596 && machopic_symbol_defined_p (x)
9597 #else
9598 && DEFAULT_ABI == ABI_V4
9599 && !flag_pic
9600 #endif
9601 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9602 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9603 without fprs.
9604 ??? Assume floating point reg based on mode? This assumption is
9605 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9606 where reload ends up doing a DFmode load of a constant from
9607 mem using two gprs. Unfortunately, at this point reload
9608 hasn't yet selected regs so poking around in reload data
9609 won't help and even if we could figure out the regs reliably,
9610 we'd still want to allow this transformation when the mem is
9611 naturally aligned. Since we say the address is good here, we
9612 can't disable offsets from LO_SUMs in mem_operand_gpr.
9613 FIXME: Allow offset from lo_sum for other modes too, when
9614 mem is sufficiently aligned.
9616 Also disallow this if the type can go in VMX/Altivec registers, since
9617 those registers do not have d-form (reg+offset) address modes. */
9618 && !reg_addr[mode].scalar_in_vmx_p
9619 && mode != TFmode
9620 && mode != TDmode
9621 && mode != IFmode
9622 && mode != KFmode
9623 && (mode != TImode || !TARGET_VSX)
9624 && mode != PTImode
9625 && (mode != DImode || TARGET_POWERPC64)
9626 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9627 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9629 #if TARGET_MACHO
9630 if (flag_pic)
9632 rtx offset = machopic_gen_offset (x);
9633 x = gen_rtx_LO_SUM (GET_MODE (x),
9634 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9635 gen_rtx_HIGH (Pmode, offset)), offset);
9637 else
9638 #endif
9639 x = gen_rtx_LO_SUM (GET_MODE (x),
9640 gen_rtx_HIGH (Pmode, x), x);
9642 if (TARGET_DEBUG_ADDR)
9644 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9645 debug_rtx (x);
9647 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9648 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9649 opnum, (enum reload_type) type);
9650 *win = 1;
9651 return x;
9654 /* Reload an offset address wrapped by an AND that represents the
9655 masking of the lower bits. Strip the outer AND and let reload
9656 convert the offset address into an indirect address. For VSX,
9657 force reload to create the address with an AND in a separate
9658 register, because we can't guarantee an altivec register will
9659 be used. */
9660 if (VECTOR_MEM_ALTIVEC_P (mode)
9661 && GET_CODE (x) == AND
9662 && GET_CODE (XEXP (x, 0)) == PLUS
9663 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9664 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9665 && GET_CODE (XEXP (x, 1)) == CONST_INT
9666 && INTVAL (XEXP (x, 1)) == -16)
9668 x = XEXP (x, 0);
9669 *win = 1;
9670 return x;
9673 if (TARGET_TOC
9674 && reg_offset_p
9675 && !quad_offset_p
9676 && GET_CODE (x) == SYMBOL_REF
9677 && use_toc_relative_ref (x, mode))
9679 x = create_TOC_reference (x, NULL_RTX);
9680 if (TARGET_CMODEL != CMODEL_SMALL)
9682 if (TARGET_DEBUG_ADDR)
9684 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9685 debug_rtx (x);
9687 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9688 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9689 opnum, (enum reload_type) type);
9691 *win = 1;
9692 return x;
9694 *win = 0;
9695 return x;
9698 /* Debug version of rs6000_legitimize_reload_address. */
9699 static rtx
9700 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9701 int opnum, int type,
9702 int ind_levels, int *win)
9704 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9705 ind_levels, win);
9706 fprintf (stderr,
9707 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9708 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9709 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9710 debug_rtx (x);
9712 if (x == ret)
9713 fprintf (stderr, "Same address returned\n");
9714 else if (!ret)
9715 fprintf (stderr, "NULL returned\n");
9716 else
9718 fprintf (stderr, "New address:\n");
9719 debug_rtx (ret);
9722 return ret;
9725 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9726 that is a valid memory address for an instruction.
9727 The MODE argument is the machine mode for the MEM expression
9728 that wants to use this address.
9730 On the RS/6000, there are four valid address: a SYMBOL_REF that
9731 refers to a constant pool entry of an address (or the sum of it
9732 plus a constant), a short (16-bit signed) constant plus a register,
9733 the sum of two registers, or a register indirect, possibly with an
9734 auto-increment. For DFmode, DDmode and DImode with a constant plus
9735 register, we must ensure that both words are addressable or PowerPC64
9736 with offset word aligned.
9738 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9739 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9740 because adjacent memory cells are accessed by adding word-sized offsets
9741 during assembly output. */
9742 static bool
9743 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9745 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9746 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9748 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9749 if (VECTOR_MEM_ALTIVEC_P (mode)
9750 && GET_CODE (x) == AND
9751 && GET_CODE (XEXP (x, 1)) == CONST_INT
9752 && INTVAL (XEXP (x, 1)) == -16)
9753 x = XEXP (x, 0);
9755 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9756 return 0;
9757 if (legitimate_indirect_address_p (x, reg_ok_strict))
9758 return 1;
9759 if (TARGET_UPDATE
9760 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9761 && mode_supports_pre_incdec_p (mode)
9762 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9763 return 1;
9764 /* Handle restricted vector d-form offsets in ISA 3.0. */
9765 if (quad_offset_p)
9767 if (quad_address_p (x, mode, reg_ok_strict))
9768 return 1;
9770 else if (virtual_stack_registers_memory_p (x))
9771 return 1;
9773 else if (reg_offset_p)
9775 if (legitimate_small_data_p (mode, x))
9776 return 1;
9777 if (legitimate_constant_pool_address_p (x, mode,
9778 reg_ok_strict || lra_in_progress))
9779 return 1;
9780 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9781 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9782 return 1;
9785 /* For TImode, if we have TImode in VSX registers, only allow register
9786 indirect addresses. This will allow the values to go in either GPRs
9787 or VSX registers without reloading. The vector types would tend to
9788 go into VSX registers, so we allow REG+REG, while TImode seems
9789 somewhat split, in that some uses are GPR based, and some VSX based. */
9790 /* FIXME: We could loosen this by changing the following to
9791 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9792 but currently we cannot allow REG+REG addressing for TImode. See
9793 PR72827 for complete details on how this ends up hoodwinking DSE. */
9794 if (mode == TImode && TARGET_VSX)
9795 return 0;
9796 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9797 if (! reg_ok_strict
9798 && reg_offset_p
9799 && GET_CODE (x) == PLUS
9800 && GET_CODE (XEXP (x, 0)) == REG
9801 && (XEXP (x, 0) == virtual_stack_vars_rtx
9802 || XEXP (x, 0) == arg_pointer_rtx)
9803 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9804 return 1;
9805 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9806 return 1;
9807 if (!FLOAT128_2REG_P (mode)
9808 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9809 || TARGET_POWERPC64
9810 || (mode != DFmode && mode != DDmode))
9811 && (TARGET_POWERPC64 || mode != DImode)
9812 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9813 && mode != PTImode
9814 && !avoiding_indexed_address_p (mode)
9815 && legitimate_indexed_address_p (x, reg_ok_strict))
9816 return 1;
9817 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9818 && mode_supports_pre_modify_p (mode)
9819 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9820 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9821 reg_ok_strict, false)
9822 || (!avoiding_indexed_address_p (mode)
9823 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9824 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9825 return 1;
9826 if (reg_offset_p && !quad_offset_p
9827 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9828 return 1;
9829 return 0;
9832 /* Debug version of rs6000_legitimate_address_p. */
9833 static bool
9834 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9835 bool reg_ok_strict)
9837 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9838 fprintf (stderr,
9839 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9840 "strict = %d, reload = %s, code = %s\n",
9841 ret ? "true" : "false",
9842 GET_MODE_NAME (mode),
9843 reg_ok_strict,
9844 (reload_completed ? "after" : "before"),
9845 GET_RTX_NAME (GET_CODE (x)));
9846 debug_rtx (x);
9848 return ret;
9851 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9853 static bool
9854 rs6000_mode_dependent_address_p (const_rtx addr,
9855 addr_space_t as ATTRIBUTE_UNUSED)
9857 return rs6000_mode_dependent_address_ptr (addr);
9860 /* Go to LABEL if ADDR (a legitimate address expression)
9861 has an effect that depends on the machine mode it is used for.
9863 On the RS/6000 this is true of all integral offsets (since AltiVec
9864 and VSX modes don't allow them) or is a pre-increment or decrement.
9866 ??? Except that due to conceptual problems in offsettable_address_p
9867 we can't really report the problems of integral offsets. So leave
9868 this assuming that the adjustable offset must be valid for the
9869 sub-words of a TFmode operand, which is what we had before. */
9871 static bool
9872 rs6000_mode_dependent_address (const_rtx addr)
9874 switch (GET_CODE (addr))
9876 case PLUS:
9877 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9878 is considered a legitimate address before reload, so there
9879 are no offset restrictions in that case. Note that this
9880 condition is safe in strict mode because any address involving
9881 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9882 been rejected as illegitimate. */
9883 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9884 && XEXP (addr, 0) != arg_pointer_rtx
9885 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9887 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9888 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9890 break;
9892 case LO_SUM:
9893 /* Anything in the constant pool is sufficiently aligned that
9894 all bytes have the same high part address. */
9895 return !legitimate_constant_pool_address_p (addr, QImode, false);
9897 /* Auto-increment cases are now treated generically in recog.c. */
9898 case PRE_MODIFY:
9899 return TARGET_UPDATE;
9901 /* AND is only allowed in Altivec loads. */
9902 case AND:
9903 return true;
9905 default:
9906 break;
9909 return false;
9912 /* Debug version of rs6000_mode_dependent_address. */
9913 static bool
9914 rs6000_debug_mode_dependent_address (const_rtx addr)
9916 bool ret = rs6000_mode_dependent_address (addr);
9918 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9919 ret ? "true" : "false");
9920 debug_rtx (addr);
9922 return ret;
9925 /* Implement FIND_BASE_TERM. */
9928 rs6000_find_base_term (rtx op)
9930 rtx base;
9932 base = op;
9933 if (GET_CODE (base) == CONST)
9934 base = XEXP (base, 0);
9935 if (GET_CODE (base) == PLUS)
9936 base = XEXP (base, 0);
9937 if (GET_CODE (base) == UNSPEC)
9938 switch (XINT (base, 1))
9940 case UNSPEC_TOCREL:
9941 case UNSPEC_MACHOPIC_OFFSET:
9942 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9943 for aliasing purposes. */
9944 return XVECEXP (base, 0, 0);
9947 return op;
9950 /* More elaborate version of recog's offsettable_memref_p predicate
9951 that works around the ??? note of rs6000_mode_dependent_address.
9952 In particular it accepts
9954 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9956 in 32-bit mode, that the recog predicate rejects. */
9958 static bool
9959 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9961 bool worst_case;
9963 if (!MEM_P (op))
9964 return false;
9966 /* First mimic offsettable_memref_p. */
9967 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9968 return true;
9970 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9971 the latter predicate knows nothing about the mode of the memory
9972 reference and, therefore, assumes that it is the largest supported
9973 mode (TFmode). As a consequence, legitimate offsettable memory
9974 references are rejected. rs6000_legitimate_offset_address_p contains
9975 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9976 at least with a little bit of help here given that we know the
9977 actual registers used. */
9978 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9979 || GET_MODE_SIZE (reg_mode) == 4);
9980 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9981 strict, worst_case);
9984 /* Determine the reassociation width to be used in reassociate_bb.
9985 This takes into account how many parallel operations we
9986 can actually do of a given type, and also the latency.
9988 int add/sub 6/cycle
9989 mul 2/cycle
9990 vect add/sub/mul 2/cycle
9991 fp add/sub/mul 2/cycle
9992 dfp 1/cycle
9995 static int
9996 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9997 machine_mode mode)
9999 switch (rs6000_tune)
10001 case PROCESSOR_POWER8:
10002 case PROCESSOR_POWER9:
10003 if (DECIMAL_FLOAT_MODE_P (mode))
10004 return 1;
10005 if (VECTOR_MODE_P (mode))
10006 return 4;
10007 if (INTEGRAL_MODE_P (mode))
10008 return 1;
10009 if (FLOAT_MODE_P (mode))
10010 return 4;
10011 break;
10012 default:
10013 break;
10015 return 1;
10018 /* Change register usage conditional on target flags. */
10019 static void
10020 rs6000_conditional_register_usage (void)
10022 int i;
10024 if (TARGET_DEBUG_TARGET)
10025 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10027 /* Set MQ register fixed (already call_used) so that it will not be
10028 allocated. */
10029 fixed_regs[64] = 1;
10031 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10032 if (TARGET_64BIT)
10033 fixed_regs[13] = call_used_regs[13]
10034 = call_really_used_regs[13] = 1;
10036 /* Conditionally disable FPRs. */
10037 if (TARGET_SOFT_FLOAT)
10038 for (i = 32; i < 64; i++)
10039 fixed_regs[i] = call_used_regs[i]
10040 = call_really_used_regs[i] = 1;
10042 /* The TOC register is not killed across calls in a way that is
10043 visible to the compiler. */
10044 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10045 call_really_used_regs[2] = 0;
10047 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10048 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10050 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10051 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10052 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10053 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10055 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10056 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10057 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10058 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10060 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10061 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10062 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10064 if (!TARGET_ALTIVEC && !TARGET_VSX)
10066 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10067 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10068 call_really_used_regs[VRSAVE_REGNO] = 1;
10071 if (TARGET_ALTIVEC || TARGET_VSX)
10072 global_regs[VSCR_REGNO] = 1;
10074 if (TARGET_ALTIVEC_ABI)
10076 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10077 call_used_regs[i] = call_really_used_regs[i] = 1;
10079 /* AIX reserves VR20:31 in non-extended ABI mode. */
10080 if (TARGET_XCOFF)
10081 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10082 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10087 /* Output insns to set DEST equal to the constant SOURCE as a series of
10088 lis, ori and shl instructions and return TRUE. */
10090 bool
10091 rs6000_emit_set_const (rtx dest, rtx source)
10093 machine_mode mode = GET_MODE (dest);
10094 rtx temp, set;
10095 rtx_insn *insn;
10096 HOST_WIDE_INT c;
10098 gcc_checking_assert (CONST_INT_P (source));
10099 c = INTVAL (source);
10100 switch (mode)
10102 case E_QImode:
10103 case E_HImode:
10104 emit_insn (gen_rtx_SET (dest, source));
10105 return true;
10107 case E_SImode:
10108 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10110 emit_insn (gen_rtx_SET (copy_rtx (temp),
10111 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10112 emit_insn (gen_rtx_SET (dest,
10113 gen_rtx_IOR (SImode, copy_rtx (temp),
10114 GEN_INT (c & 0xffff))));
10115 break;
10117 case E_DImode:
10118 if (!TARGET_POWERPC64)
10120 rtx hi, lo;
10122 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10123 DImode);
10124 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10125 DImode);
10126 emit_move_insn (hi, GEN_INT (c >> 32));
10127 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10128 emit_move_insn (lo, GEN_INT (c));
10130 else
10131 rs6000_emit_set_long_const (dest, c);
10132 break;
10134 default:
10135 gcc_unreachable ();
10138 insn = get_last_insn ();
10139 set = single_set (insn);
10140 if (! CONSTANT_P (SET_SRC (set)))
10141 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10143 return true;
10146 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10147 Output insns to set DEST equal to the constant C as a series of
10148 lis, ori and shl instructions. */
10150 static void
10151 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10153 rtx temp;
10154 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10156 ud1 = c & 0xffff;
10157 c = c >> 16;
10158 ud2 = c & 0xffff;
10159 c = c >> 16;
10160 ud3 = c & 0xffff;
10161 c = c >> 16;
10162 ud4 = c & 0xffff;
10164 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10165 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10166 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10168 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10169 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10171 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10173 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10174 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10175 if (ud1 != 0)
10176 emit_move_insn (dest,
10177 gen_rtx_IOR (DImode, copy_rtx (temp),
10178 GEN_INT (ud1)));
10180 else if (ud3 == 0 && ud4 == 0)
10182 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10184 gcc_assert (ud2 & 0x8000);
10185 emit_move_insn (copy_rtx (temp),
10186 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10187 if (ud1 != 0)
10188 emit_move_insn (copy_rtx (temp),
10189 gen_rtx_IOR (DImode, copy_rtx (temp),
10190 GEN_INT (ud1)));
10191 emit_move_insn (dest,
10192 gen_rtx_ZERO_EXTEND (DImode,
10193 gen_lowpart (SImode,
10194 copy_rtx (temp))));
10196 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10197 || (ud4 == 0 && ! (ud3 & 0x8000)))
10199 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10201 emit_move_insn (copy_rtx (temp),
10202 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10203 if (ud2 != 0)
10204 emit_move_insn (copy_rtx (temp),
10205 gen_rtx_IOR (DImode, copy_rtx (temp),
10206 GEN_INT (ud2)));
10207 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10208 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10209 GEN_INT (16)));
10210 if (ud1 != 0)
10211 emit_move_insn (dest,
10212 gen_rtx_IOR (DImode, copy_rtx (temp),
10213 GEN_INT (ud1)));
10215 else
10217 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10219 emit_move_insn (copy_rtx (temp),
10220 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10221 if (ud3 != 0)
10222 emit_move_insn (copy_rtx (temp),
10223 gen_rtx_IOR (DImode, copy_rtx (temp),
10224 GEN_INT (ud3)));
10226 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10227 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10228 GEN_INT (32)));
10229 if (ud2 != 0)
10230 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10231 gen_rtx_IOR (DImode, copy_rtx (temp),
10232 GEN_INT (ud2 << 16)));
10233 if (ud1 != 0)
10234 emit_move_insn (dest,
10235 gen_rtx_IOR (DImode, copy_rtx (temp),
10236 GEN_INT (ud1)));
10240 /* Helper for the following. Get rid of [r+r] memory refs
10241 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10243 static void
10244 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10246 if (GET_CODE (operands[0]) == MEM
10247 && GET_CODE (XEXP (operands[0], 0)) != REG
10248 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10249 GET_MODE (operands[0]), false))
10250 operands[0]
10251 = replace_equiv_address (operands[0],
10252 copy_addr_to_reg (XEXP (operands[0], 0)));
10254 if (GET_CODE (operands[1]) == MEM
10255 && GET_CODE (XEXP (operands[1], 0)) != REG
10256 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10257 GET_MODE (operands[1]), false))
10258 operands[1]
10259 = replace_equiv_address (operands[1],
10260 copy_addr_to_reg (XEXP (operands[1], 0)));
10263 /* Generate a vector of constants to permute MODE for a little-endian
10264 storage operation by swapping the two halves of a vector. */
10265 static rtvec
10266 rs6000_const_vec (machine_mode mode)
10268 int i, subparts;
10269 rtvec v;
10271 switch (mode)
10273 case E_V1TImode:
10274 subparts = 1;
10275 break;
10276 case E_V2DFmode:
10277 case E_V2DImode:
10278 subparts = 2;
10279 break;
10280 case E_V4SFmode:
10281 case E_V4SImode:
10282 subparts = 4;
10283 break;
10284 case E_V8HImode:
10285 subparts = 8;
10286 break;
10287 case E_V16QImode:
10288 subparts = 16;
10289 break;
10290 default:
10291 gcc_unreachable();
10294 v = rtvec_alloc (subparts);
10296 for (i = 0; i < subparts / 2; ++i)
10297 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10298 for (i = subparts / 2; i < subparts; ++i)
10299 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10301 return v;
10304 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10305 store operation. */
10306 void
10307 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10309 /* Scalar permutations are easier to express in integer modes rather than
10310 floating-point modes, so cast them here. We use V1TImode instead
10311 of TImode to ensure that the values don't go through GPRs. */
10312 if (FLOAT128_VECTOR_P (mode))
10314 dest = gen_lowpart (V1TImode, dest);
10315 source = gen_lowpart (V1TImode, source);
10316 mode = V1TImode;
10319 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10320 scalar. */
10321 if (mode == TImode || mode == V1TImode)
10322 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10323 GEN_INT (64))));
10324 else
10326 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10327 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10331 /* Emit a little-endian load from vector memory location SOURCE to VSX
10332 register DEST in mode MODE. The load is done with two permuting
10333 insn's that represent an lxvd2x and xxpermdi. */
10334 void
10335 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10337 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10338 V1TImode). */
10339 if (mode == TImode || mode == V1TImode)
10341 mode = V2DImode;
10342 dest = gen_lowpart (V2DImode, dest);
10343 source = adjust_address (source, V2DImode, 0);
10346 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10347 rs6000_emit_le_vsx_permute (tmp, source, mode);
10348 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10351 /* Emit a little-endian store to vector memory location DEST from VSX
10352 register SOURCE in mode MODE. The store is done with two permuting
10353 insn's that represent an xxpermdi and an stxvd2x. */
10354 void
10355 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10357 /* This should never be called during or after LRA, because it does
10358 not re-permute the source register. It is intended only for use
10359 during expand. */
10360 gcc_assert (!lra_in_progress && !reload_completed);
10362 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10363 V1TImode). */
10364 if (mode == TImode || mode == V1TImode)
10366 mode = V2DImode;
10367 dest = adjust_address (dest, V2DImode, 0);
10368 source = gen_lowpart (V2DImode, source);
10371 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10372 rs6000_emit_le_vsx_permute (tmp, source, mode);
10373 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10376 /* Emit a sequence representing a little-endian VSX load or store,
10377 moving data from SOURCE to DEST in mode MODE. This is done
10378 separately from rs6000_emit_move to ensure it is called only
10379 during expand. LE VSX loads and stores introduced later are
10380 handled with a split. The expand-time RTL generation allows
10381 us to optimize away redundant pairs of register-permutes. */
10382 void
10383 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10385 gcc_assert (!BYTES_BIG_ENDIAN
10386 && VECTOR_MEM_VSX_P (mode)
10387 && !TARGET_P9_VECTOR
10388 && !gpr_or_gpr_p (dest, source)
10389 && (MEM_P (source) ^ MEM_P (dest)));
10391 if (MEM_P (source))
10393 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10394 rs6000_emit_le_vsx_load (dest, source, mode);
10396 else
10398 if (!REG_P (source))
10399 source = force_reg (mode, source);
10400 rs6000_emit_le_vsx_store (dest, source, mode);
10404 /* Return whether a SFmode or SImode move can be done without converting one
10405 mode to another. This arrises when we have:
10407 (SUBREG:SF (REG:SI ...))
10408 (SUBREG:SI (REG:SF ...))
10410 and one of the values is in a floating point/vector register, where SFmode
10411 scalars are stored in DFmode format. */
10413 bool
10414 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10416 if (TARGET_ALLOW_SF_SUBREG)
10417 return true;
10419 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10420 return true;
10422 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10423 return true;
10425 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10426 if (SUBREG_P (dest))
10428 rtx dest_subreg = SUBREG_REG (dest);
10429 rtx src_subreg = SUBREG_REG (src);
10430 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10433 return false;
10437 /* Helper function to change moves with:
10439 (SUBREG:SF (REG:SI)) and
10440 (SUBREG:SI (REG:SF))
10442 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10443 values are stored as DFmode values in the VSX registers. We need to convert
10444 the bits before we can use a direct move or operate on the bits in the
10445 vector register as an integer type.
10447 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10449 static bool
10450 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10452 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10453 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10454 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10456 rtx inner_source = SUBREG_REG (source);
10457 machine_mode inner_mode = GET_MODE (inner_source);
10459 if (mode == SImode && inner_mode == SFmode)
10461 emit_insn (gen_movsi_from_sf (dest, inner_source));
10462 return true;
10465 if (mode == SFmode && inner_mode == SImode)
10467 emit_insn (gen_movsf_from_si (dest, inner_source));
10468 return true;
10472 return false;
10475 /* Emit a move from SOURCE to DEST in mode MODE. */
10476 void
10477 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10479 rtx operands[2];
10480 operands[0] = dest;
10481 operands[1] = source;
10483 if (TARGET_DEBUG_ADDR)
10485 fprintf (stderr,
10486 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10487 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10488 GET_MODE_NAME (mode),
10489 lra_in_progress,
10490 reload_completed,
10491 can_create_pseudo_p ());
10492 debug_rtx (dest);
10493 fprintf (stderr, "source:\n");
10494 debug_rtx (source);
10497 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10498 if (CONST_WIDE_INT_P (operands[1])
10499 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10501 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10502 gcc_unreachable ();
10505 #ifdef HAVE_AS_GNU_ATTRIBUTE
10506 /* If we use a long double type, set the flags in .gnu_attribute that say
10507 what the long double type is. This is to allow the linker's warning
10508 message for the wrong long double to be useful, even if the function does
10509 not do a call (for example, doing a 128-bit add on power9 if the long
10510 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10511 used if they aren't the default long dobule type. */
10512 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
10514 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
10515 rs6000_passes_float = rs6000_passes_long_double = true;
10517 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
10518 rs6000_passes_float = rs6000_passes_long_double = true;
10520 #endif
10522 /* See if we need to special case SImode/SFmode SUBREG moves. */
10523 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10524 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10525 return;
10527 /* Check if GCC is setting up a block move that will end up using FP
10528 registers as temporaries. We must make sure this is acceptable. */
10529 if (GET_CODE (operands[0]) == MEM
10530 && GET_CODE (operands[1]) == MEM
10531 && mode == DImode
10532 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10533 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10534 && ! (rs6000_slow_unaligned_access (SImode,
10535 (MEM_ALIGN (operands[0]) > 32
10536 ? 32 : MEM_ALIGN (operands[0])))
10537 || rs6000_slow_unaligned_access (SImode,
10538 (MEM_ALIGN (operands[1]) > 32
10539 ? 32 : MEM_ALIGN (operands[1]))))
10540 && ! MEM_VOLATILE_P (operands [0])
10541 && ! MEM_VOLATILE_P (operands [1]))
10543 emit_move_insn (adjust_address (operands[0], SImode, 0),
10544 adjust_address (operands[1], SImode, 0));
10545 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10546 adjust_address (copy_rtx (operands[1]), SImode, 4));
10547 return;
10550 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10551 && !gpc_reg_operand (operands[1], mode))
10552 operands[1] = force_reg (mode, operands[1]);
10554 /* Recognize the case where operand[1] is a reference to thread-local
10555 data and load its address to a register. */
10556 if (tls_referenced_p (operands[1]))
10558 enum tls_model model;
10559 rtx tmp = operands[1];
10560 rtx addend = NULL;
10562 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10564 addend = XEXP (XEXP (tmp, 0), 1);
10565 tmp = XEXP (XEXP (tmp, 0), 0);
10568 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10569 model = SYMBOL_REF_TLS_MODEL (tmp);
10570 gcc_assert (model != 0);
10572 tmp = rs6000_legitimize_tls_address (tmp, model);
10573 if (addend)
10575 tmp = gen_rtx_PLUS (mode, tmp, addend);
10576 tmp = force_operand (tmp, operands[0]);
10578 operands[1] = tmp;
10581 /* 128-bit constant floating-point values on Darwin should really be loaded
10582 as two parts. However, this premature splitting is a problem when DFmode
10583 values can go into Altivec registers. */
10584 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10585 && GET_CODE (operands[1]) == CONST_DOUBLE)
10587 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10588 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10589 DFmode);
10590 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10591 GET_MODE_SIZE (DFmode)),
10592 simplify_gen_subreg (DFmode, operands[1], mode,
10593 GET_MODE_SIZE (DFmode)),
10594 DFmode);
10595 return;
10598 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10599 p1:SD) if p1 is not of floating point class and p0 is spilled as
10600 we can have no analogous movsd_store for this. */
10601 if (lra_in_progress && mode == DDmode
10602 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10603 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10604 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10605 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10607 enum reg_class cl;
10608 int regno = REGNO (SUBREG_REG (operands[1]));
10610 if (regno >= FIRST_PSEUDO_REGISTER)
10612 cl = reg_preferred_class (regno);
10613 regno = reg_renumber[regno];
10614 if (regno < 0)
10615 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10617 if (regno >= 0 && ! FP_REGNO_P (regno))
10619 mode = SDmode;
10620 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10621 operands[1] = SUBREG_REG (operands[1]);
10624 if (lra_in_progress
10625 && mode == SDmode
10626 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10627 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10628 && (REG_P (operands[1])
10629 || (GET_CODE (operands[1]) == SUBREG
10630 && REG_P (SUBREG_REG (operands[1])))))
10632 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10633 ? SUBREG_REG (operands[1]) : operands[1]);
10634 enum reg_class cl;
10636 if (regno >= FIRST_PSEUDO_REGISTER)
10638 cl = reg_preferred_class (regno);
10639 gcc_assert (cl != NO_REGS);
10640 regno = reg_renumber[regno];
10641 if (regno < 0)
10642 regno = ira_class_hard_regs[cl][0];
10644 if (FP_REGNO_P (regno))
10646 if (GET_MODE (operands[0]) != DDmode)
10647 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10648 emit_insn (gen_movsd_store (operands[0], operands[1]));
10650 else if (INT_REGNO_P (regno))
10651 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10652 else
10653 gcc_unreachable();
10654 return;
10656 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10657 p:DD)) if p0 is not of floating point class and p1 is spilled as
10658 we can have no analogous movsd_load for this. */
10659 if (lra_in_progress && mode == DDmode
10660 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10661 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10662 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10663 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10665 enum reg_class cl;
10666 int regno = REGNO (SUBREG_REG (operands[0]));
10668 if (regno >= FIRST_PSEUDO_REGISTER)
10670 cl = reg_preferred_class (regno);
10671 regno = reg_renumber[regno];
10672 if (regno < 0)
10673 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10675 if (regno >= 0 && ! FP_REGNO_P (regno))
10677 mode = SDmode;
10678 operands[0] = SUBREG_REG (operands[0]);
10679 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10682 if (lra_in_progress
10683 && mode == SDmode
10684 && (REG_P (operands[0])
10685 || (GET_CODE (operands[0]) == SUBREG
10686 && REG_P (SUBREG_REG (operands[0]))))
10687 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10688 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10690 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10691 ? SUBREG_REG (operands[0]) : operands[0]);
10692 enum reg_class cl;
10694 if (regno >= FIRST_PSEUDO_REGISTER)
10696 cl = reg_preferred_class (regno);
10697 gcc_assert (cl != NO_REGS);
10698 regno = reg_renumber[regno];
10699 if (regno < 0)
10700 regno = ira_class_hard_regs[cl][0];
10702 if (FP_REGNO_P (regno))
10704 if (GET_MODE (operands[1]) != DDmode)
10705 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10706 emit_insn (gen_movsd_load (operands[0], operands[1]));
10708 else if (INT_REGNO_P (regno))
10709 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10710 else
10711 gcc_unreachable();
10712 return;
10715 /* FIXME: In the long term, this switch statement should go away
10716 and be replaced by a sequence of tests based on things like
10717 mode == Pmode. */
10718 switch (mode)
10720 case E_HImode:
10721 case E_QImode:
10722 if (CONSTANT_P (operands[1])
10723 && GET_CODE (operands[1]) != CONST_INT)
10724 operands[1] = force_const_mem (mode, operands[1]);
10725 break;
10727 case E_TFmode:
10728 case E_TDmode:
10729 case E_IFmode:
10730 case E_KFmode:
10731 if (FLOAT128_2REG_P (mode))
10732 rs6000_eliminate_indexed_memrefs (operands);
10733 /* fall through */
10735 case E_DFmode:
10736 case E_DDmode:
10737 case E_SFmode:
10738 case E_SDmode:
10739 if (CONSTANT_P (operands[1])
10740 && ! easy_fp_constant (operands[1], mode))
10741 operands[1] = force_const_mem (mode, operands[1]);
10742 break;
10744 case E_V16QImode:
10745 case E_V8HImode:
10746 case E_V4SFmode:
10747 case E_V4SImode:
10748 case E_V2SFmode:
10749 case E_V2SImode:
10750 case E_V2DFmode:
10751 case E_V2DImode:
10752 case E_V1TImode:
10753 if (CONSTANT_P (operands[1])
10754 && !easy_vector_constant (operands[1], mode))
10755 operands[1] = force_const_mem (mode, operands[1]);
10756 break;
10758 case E_SImode:
10759 case E_DImode:
10760 /* Use default pattern for address of ELF small data */
10761 if (TARGET_ELF
10762 && mode == Pmode
10763 && DEFAULT_ABI == ABI_V4
10764 && (GET_CODE (operands[1]) == SYMBOL_REF
10765 || GET_CODE (operands[1]) == CONST)
10766 && small_data_operand (operands[1], mode))
10768 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10769 return;
10772 if (DEFAULT_ABI == ABI_V4
10773 && mode == Pmode && mode == SImode
10774 && flag_pic == 1 && got_operand (operands[1], mode))
10776 emit_insn (gen_movsi_got (operands[0], operands[1]));
10777 return;
10780 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10781 && TARGET_NO_TOC
10782 && ! flag_pic
10783 && mode == Pmode
10784 && CONSTANT_P (operands[1])
10785 && GET_CODE (operands[1]) != HIGH
10786 && GET_CODE (operands[1]) != CONST_INT)
10788 rtx target = (!can_create_pseudo_p ()
10789 ? operands[0]
10790 : gen_reg_rtx (mode));
10792 /* If this is a function address on -mcall-aixdesc,
10793 convert it to the address of the descriptor. */
10794 if (DEFAULT_ABI == ABI_AIX
10795 && GET_CODE (operands[1]) == SYMBOL_REF
10796 && XSTR (operands[1], 0)[0] == '.')
10798 const char *name = XSTR (operands[1], 0);
10799 rtx new_ref;
10800 while (*name == '.')
10801 name++;
10802 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10803 CONSTANT_POOL_ADDRESS_P (new_ref)
10804 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10805 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10806 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10807 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10808 operands[1] = new_ref;
10811 if (DEFAULT_ABI == ABI_DARWIN)
10813 #if TARGET_MACHO
10814 if (MACHO_DYNAMIC_NO_PIC_P)
10816 /* Take care of any required data indirection. */
10817 operands[1] = rs6000_machopic_legitimize_pic_address (
10818 operands[1], mode, operands[0]);
10819 if (operands[0] != operands[1])
10820 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10821 return;
10823 #endif
10824 emit_insn (gen_macho_high (target, operands[1]));
10825 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10826 return;
10829 emit_insn (gen_elf_high (target, operands[1]));
10830 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10831 return;
10834 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10835 and we have put it in the TOC, we just need to make a TOC-relative
10836 reference to it. */
10837 if (TARGET_TOC
10838 && GET_CODE (operands[1]) == SYMBOL_REF
10839 && use_toc_relative_ref (operands[1], mode))
10840 operands[1] = create_TOC_reference (operands[1], operands[0]);
10841 else if (mode == Pmode
10842 && CONSTANT_P (operands[1])
10843 && GET_CODE (operands[1]) != HIGH
10844 && ((GET_CODE (operands[1]) != CONST_INT
10845 && ! easy_fp_constant (operands[1], mode))
10846 || (GET_CODE (operands[1]) == CONST_INT
10847 && (num_insns_constant (operands[1], mode)
10848 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10849 || (GET_CODE (operands[0]) == REG
10850 && FP_REGNO_P (REGNO (operands[0]))))
10851 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10852 && (TARGET_CMODEL == CMODEL_SMALL
10853 || can_create_pseudo_p ()
10854 || (REG_P (operands[0])
10855 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10858 #if TARGET_MACHO
10859 /* Darwin uses a special PIC legitimizer. */
10860 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10862 operands[1] =
10863 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10864 operands[0]);
10865 if (operands[0] != operands[1])
10866 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10867 return;
10869 #endif
10871 /* If we are to limit the number of things we put in the TOC and
10872 this is a symbol plus a constant we can add in one insn,
10873 just put the symbol in the TOC and add the constant. */
10874 if (GET_CODE (operands[1]) == CONST
10875 && TARGET_NO_SUM_IN_TOC
10876 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10877 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10878 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10879 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10880 && ! side_effects_p (operands[0]))
10882 rtx sym =
10883 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10884 rtx other = XEXP (XEXP (operands[1], 0), 1);
10886 sym = force_reg (mode, sym);
10887 emit_insn (gen_add3_insn (operands[0], sym, other));
10888 return;
10891 operands[1] = force_const_mem (mode, operands[1]);
10893 if (TARGET_TOC
10894 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10895 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10897 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10898 operands[0]);
10899 operands[1] = gen_const_mem (mode, tocref);
10900 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10903 break;
10905 case E_TImode:
10906 if (!VECTOR_MEM_VSX_P (TImode))
10907 rs6000_eliminate_indexed_memrefs (operands);
10908 break;
10910 case E_PTImode:
10911 rs6000_eliminate_indexed_memrefs (operands);
10912 break;
10914 default:
10915 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10918 /* Above, we may have called force_const_mem which may have returned
10919 an invalid address. If we can, fix this up; otherwise, reload will
10920 have to deal with it. */
10921 if (GET_CODE (operands[1]) == MEM)
10922 operands[1] = validize_mem (operands[1]);
10924 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10927 /* Nonzero if we can use a floating-point register to pass this arg. */
10928 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10929 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10930 && (CUM)->fregno <= FP_ARG_MAX_REG \
10931 && TARGET_HARD_FLOAT)
10933 /* Nonzero if we can use an AltiVec register to pass this arg. */
10934 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10935 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10936 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10937 && TARGET_ALTIVEC_ABI \
10938 && (NAMED))
10940 /* Walk down the type tree of TYPE counting consecutive base elements.
10941 If *MODEP is VOIDmode, then set it to the first valid floating point
10942 or vector type. If a non-floating point or vector type is found, or
10943 if a floating point or vector type that doesn't match a non-VOIDmode
10944 *MODEP is found, then return -1, otherwise return the count in the
10945 sub-tree. */
10947 static int
10948 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10950 machine_mode mode;
10951 HOST_WIDE_INT size;
10953 switch (TREE_CODE (type))
10955 case REAL_TYPE:
10956 mode = TYPE_MODE (type);
10957 if (!SCALAR_FLOAT_MODE_P (mode))
10958 return -1;
10960 if (*modep == VOIDmode)
10961 *modep = mode;
10963 if (*modep == mode)
10964 return 1;
10966 break;
10968 case COMPLEX_TYPE:
10969 mode = TYPE_MODE (TREE_TYPE (type));
10970 if (!SCALAR_FLOAT_MODE_P (mode))
10971 return -1;
10973 if (*modep == VOIDmode)
10974 *modep = mode;
10976 if (*modep == mode)
10977 return 2;
10979 break;
10981 case VECTOR_TYPE:
10982 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10983 return -1;
10985 /* Use V4SImode as representative of all 128-bit vector types. */
10986 size = int_size_in_bytes (type);
10987 switch (size)
10989 case 16:
10990 mode = V4SImode;
10991 break;
10992 default:
10993 return -1;
10996 if (*modep == VOIDmode)
10997 *modep = mode;
10999 /* Vector modes are considered to be opaque: two vectors are
11000 equivalent for the purposes of being homogeneous aggregates
11001 if they are the same size. */
11002 if (*modep == mode)
11003 return 1;
11005 break;
11007 case ARRAY_TYPE:
11009 int count;
11010 tree index = TYPE_DOMAIN (type);
11012 /* Can't handle incomplete types nor sizes that are not
11013 fixed. */
11014 if (!COMPLETE_TYPE_P (type)
11015 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11016 return -1;
11018 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11019 if (count == -1
11020 || !index
11021 || !TYPE_MAX_VALUE (index)
11022 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11023 || !TYPE_MIN_VALUE (index)
11024 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11025 || count < 0)
11026 return -1;
11028 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11029 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11031 /* There must be no padding. */
11032 if (wi::to_wide (TYPE_SIZE (type))
11033 != count * GET_MODE_BITSIZE (*modep))
11034 return -1;
11036 return count;
11039 case RECORD_TYPE:
11041 int count = 0;
11042 int sub_count;
11043 tree field;
11045 /* Can't handle incomplete types nor sizes that are not
11046 fixed. */
11047 if (!COMPLETE_TYPE_P (type)
11048 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11049 return -1;
11051 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11053 if (TREE_CODE (field) != FIELD_DECL)
11054 continue;
11056 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11057 if (sub_count < 0)
11058 return -1;
11059 count += sub_count;
11062 /* There must be no padding. */
11063 if (wi::to_wide (TYPE_SIZE (type))
11064 != count * GET_MODE_BITSIZE (*modep))
11065 return -1;
11067 return count;
11070 case UNION_TYPE:
11071 case QUAL_UNION_TYPE:
11073 /* These aren't very interesting except in a degenerate case. */
11074 int count = 0;
11075 int sub_count;
11076 tree field;
11078 /* Can't handle incomplete types nor sizes that are not
11079 fixed. */
11080 if (!COMPLETE_TYPE_P (type)
11081 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11082 return -1;
11084 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11086 if (TREE_CODE (field) != FIELD_DECL)
11087 continue;
11089 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11090 if (sub_count < 0)
11091 return -1;
11092 count = count > sub_count ? count : sub_count;
11095 /* There must be no padding. */
11096 if (wi::to_wide (TYPE_SIZE (type))
11097 != count * GET_MODE_BITSIZE (*modep))
11098 return -1;
11100 return count;
11103 default:
11104 break;
11107 return -1;
11110 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11111 float or vector aggregate that shall be passed in FP/vector registers
11112 according to the ELFv2 ABI, return the homogeneous element mode in
11113 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11115 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11117 static bool
11118 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11119 machine_mode *elt_mode,
11120 int *n_elts)
11122 /* Note that we do not accept complex types at the top level as
11123 homogeneous aggregates; these types are handled via the
11124 targetm.calls.split_complex_arg mechanism. Complex types
11125 can be elements of homogeneous aggregates, however. */
11126 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
11127 && AGGREGATE_TYPE_P (type))
11129 machine_mode field_mode = VOIDmode;
11130 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11132 if (field_count > 0)
11134 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11135 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11137 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11138 up to AGGR_ARG_NUM_REG registers. */
11139 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11141 if (elt_mode)
11142 *elt_mode = field_mode;
11143 if (n_elts)
11144 *n_elts = field_count;
11145 return true;
11150 if (elt_mode)
11151 *elt_mode = mode;
11152 if (n_elts)
11153 *n_elts = 1;
11154 return false;
11157 /* Return a nonzero value to say to return the function value in
11158 memory, just as large structures are always returned. TYPE will be
11159 the data type of the value, and FNTYPE will be the type of the
11160 function doing the returning, or @code{NULL} for libcalls.
11162 The AIX ABI for the RS/6000 specifies that all structures are
11163 returned in memory. The Darwin ABI does the same.
11165 For the Darwin 64 Bit ABI, a function result can be returned in
11166 registers or in memory, depending on the size of the return data
11167 type. If it is returned in registers, the value occupies the same
11168 registers as it would if it were the first and only function
11169 argument. Otherwise, the function places its result in memory at
11170 the location pointed to by GPR3.
11172 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11173 but a draft put them in memory, and GCC used to implement the draft
11174 instead of the final standard. Therefore, aix_struct_return
11175 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11176 compatibility can change DRAFT_V4_STRUCT_RET to override the
11177 default, and -m switches get the final word. See
11178 rs6000_option_override_internal for more details.
11180 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11181 long double support is enabled. These values are returned in memory.
11183 int_size_in_bytes returns -1 for variable size objects, which go in
11184 memory always. The cast to unsigned makes -1 > 8. */
11186 static bool
11187 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11189 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11190 if (TARGET_MACHO
11191 && rs6000_darwin64_abi
11192 && TREE_CODE (type) == RECORD_TYPE
11193 && int_size_in_bytes (type) > 0)
11195 CUMULATIVE_ARGS valcum;
11196 rtx valret;
11198 valcum.words = 0;
11199 valcum.fregno = FP_ARG_MIN_REG;
11200 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11201 /* Do a trial code generation as if this were going to be passed
11202 as an argument; if any part goes in memory, we return NULL. */
11203 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11204 if (valret)
11205 return false;
11206 /* Otherwise fall through to more conventional ABI rules. */
11209 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11210 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11211 NULL, NULL))
11212 return false;
11214 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11215 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11216 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11217 return false;
11219 if (AGGREGATE_TYPE_P (type)
11220 && (aix_struct_return
11221 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11222 return true;
11224 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11225 modes only exist for GCC vector types if -maltivec. */
11226 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11227 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11228 return false;
11230 /* Return synthetic vectors in memory. */
11231 if (TREE_CODE (type) == VECTOR_TYPE
11232 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11234 static bool warned_for_return_big_vectors = false;
11235 if (!warned_for_return_big_vectors)
11237 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11238 "non-standard ABI extension with no compatibility "
11239 "guarantee");
11240 warned_for_return_big_vectors = true;
11242 return true;
11245 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11246 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11247 return true;
11249 return false;
11252 /* Specify whether values returned in registers should be at the most
11253 significant end of a register. We want aggregates returned by
11254 value to match the way aggregates are passed to functions. */
11256 static bool
11257 rs6000_return_in_msb (const_tree valtype)
11259 return (DEFAULT_ABI == ABI_ELFv2
11260 && BYTES_BIG_ENDIAN
11261 && AGGREGATE_TYPE_P (valtype)
11262 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11263 == PAD_UPWARD));
11266 #ifdef HAVE_AS_GNU_ATTRIBUTE
11267 /* Return TRUE if a call to function FNDECL may be one that
11268 potentially affects the function calling ABI of the object file. */
11270 static bool
11271 call_ABI_of_interest (tree fndecl)
11273 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11275 struct cgraph_node *c_node;
11277 /* Libcalls are always interesting. */
11278 if (fndecl == NULL_TREE)
11279 return true;
11281 /* Any call to an external function is interesting. */
11282 if (DECL_EXTERNAL (fndecl))
11283 return true;
11285 /* Interesting functions that we are emitting in this object file. */
11286 c_node = cgraph_node::get (fndecl);
11287 c_node = c_node->ultimate_alias_target ();
11288 return !c_node->only_called_directly_p ();
11290 return false;
11292 #endif
11294 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11295 for a call to a function whose data type is FNTYPE.
11296 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11298 For incoming args we set the number of arguments in the prototype large
11299 so we never return a PARALLEL. */
11301 void
11302 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11303 rtx libname ATTRIBUTE_UNUSED, int incoming,
11304 int libcall, int n_named_args,
11305 tree fndecl ATTRIBUTE_UNUSED,
11306 machine_mode return_mode ATTRIBUTE_UNUSED)
11308 static CUMULATIVE_ARGS zero_cumulative;
11310 *cum = zero_cumulative;
11311 cum->words = 0;
11312 cum->fregno = FP_ARG_MIN_REG;
11313 cum->vregno = ALTIVEC_ARG_MIN_REG;
11314 cum->prototype = (fntype && prototype_p (fntype));
11315 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11316 ? CALL_LIBCALL : CALL_NORMAL);
11317 cum->sysv_gregno = GP_ARG_MIN_REG;
11318 cum->stdarg = stdarg_p (fntype);
11319 cum->libcall = libcall;
11321 cum->nargs_prototype = 0;
11322 if (incoming || cum->prototype)
11323 cum->nargs_prototype = n_named_args;
11325 /* Check for a longcall attribute. */
11326 if ((!fntype && rs6000_default_long_calls)
11327 || (fntype
11328 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11329 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11330 cum->call_cookie |= CALL_LONG;
11332 if (TARGET_DEBUG_ARG)
11334 fprintf (stderr, "\ninit_cumulative_args:");
11335 if (fntype)
11337 tree ret_type = TREE_TYPE (fntype);
11338 fprintf (stderr, " ret code = %s,",
11339 get_tree_code_name (TREE_CODE (ret_type)));
11342 if (cum->call_cookie & CALL_LONG)
11343 fprintf (stderr, " longcall,");
11345 fprintf (stderr, " proto = %d, nargs = %d\n",
11346 cum->prototype, cum->nargs_prototype);
11349 #ifdef HAVE_AS_GNU_ATTRIBUTE
11350 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11352 cum->escapes = call_ABI_of_interest (fndecl);
11353 if (cum->escapes)
11355 tree return_type;
11357 if (fntype)
11359 return_type = TREE_TYPE (fntype);
11360 return_mode = TYPE_MODE (return_type);
11362 else
11363 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11365 if (return_type != NULL)
11367 if (TREE_CODE (return_type) == RECORD_TYPE
11368 && TYPE_TRANSPARENT_AGGR (return_type))
11370 return_type = TREE_TYPE (first_field (return_type));
11371 return_mode = TYPE_MODE (return_type);
11373 if (AGGREGATE_TYPE_P (return_type)
11374 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11375 <= 8))
11376 rs6000_returns_struct = true;
11378 if (SCALAR_FLOAT_MODE_P (return_mode))
11380 rs6000_passes_float = true;
11381 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11382 && (FLOAT128_IBM_P (return_mode)
11383 || FLOAT128_IEEE_P (return_mode)
11384 || (return_type != NULL
11385 && (TYPE_MAIN_VARIANT (return_type)
11386 == long_double_type_node))))
11387 rs6000_passes_long_double = true;
11389 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11390 || PAIRED_VECTOR_MODE (return_mode))
11391 rs6000_passes_vector = true;
11394 #endif
11396 if (fntype
11397 && !TARGET_ALTIVEC
11398 && TARGET_ALTIVEC_ABI
11399 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11401 error ("cannot return value in vector register because"
11402 " altivec instructions are disabled, use %qs"
11403 " to enable them", "-maltivec");
11407 /* The mode the ABI uses for a word. This is not the same as word_mode
11408 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11410 static scalar_int_mode
11411 rs6000_abi_word_mode (void)
11413 return TARGET_32BIT ? SImode : DImode;
11416 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11417 static char *
11418 rs6000_offload_options (void)
11420 if (TARGET_64BIT)
11421 return xstrdup ("-foffload-abi=lp64");
11422 else
11423 return xstrdup ("-foffload-abi=ilp32");
11426 /* On rs6000, function arguments are promoted, as are function return
11427 values. */
11429 static machine_mode
11430 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11431 machine_mode mode,
11432 int *punsignedp ATTRIBUTE_UNUSED,
11433 const_tree, int)
11435 PROMOTE_MODE (mode, *punsignedp, type);
11437 return mode;
11440 /* Return true if TYPE must be passed on the stack and not in registers. */
11442 static bool
11443 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11445 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11446 return must_pass_in_stack_var_size (mode, type);
11447 else
11448 return must_pass_in_stack_var_size_or_pad (mode, type);
11451 static inline bool
11452 is_complex_IBM_long_double (machine_mode mode)
11454 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
11457 /* Whether ABI_V4 passes MODE args to a function in floating point
11458 registers. */
11460 static bool
11461 abi_v4_pass_in_fpr (machine_mode mode, bool named)
11463 if (!TARGET_HARD_FLOAT)
11464 return false;
11465 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11466 return true;
11467 if (TARGET_SINGLE_FLOAT && mode == SFmode && named)
11468 return true;
11469 /* ABI_V4 passes complex IBM long double in 8 gprs.
11470 Stupid, but we can't change the ABI now. */
11471 if (is_complex_IBM_long_double (mode))
11472 return false;
11473 if (FLOAT128_2REG_P (mode))
11474 return true;
11475 if (DECIMAL_FLOAT_MODE_P (mode))
11476 return true;
11477 return false;
11480 /* Implement TARGET_FUNCTION_ARG_PADDING.
11482 For the AIX ABI structs are always stored left shifted in their
11483 argument slot. */
11485 static pad_direction
11486 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11488 #ifndef AGGREGATE_PADDING_FIXED
11489 #define AGGREGATE_PADDING_FIXED 0
11490 #endif
11491 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11492 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11493 #endif
11495 if (!AGGREGATE_PADDING_FIXED)
11497 /* GCC used to pass structures of the same size as integer types as
11498 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11499 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11500 passed padded downward, except that -mstrict-align further
11501 muddied the water in that multi-component structures of 2 and 4
11502 bytes in size were passed padded upward.
11504 The following arranges for best compatibility with previous
11505 versions of gcc, but removes the -mstrict-align dependency. */
11506 if (BYTES_BIG_ENDIAN)
11508 HOST_WIDE_INT size = 0;
11510 if (mode == BLKmode)
11512 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11513 size = int_size_in_bytes (type);
11515 else
11516 size = GET_MODE_SIZE (mode);
11518 if (size == 1 || size == 2 || size == 4)
11519 return PAD_DOWNWARD;
11521 return PAD_UPWARD;
11524 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11526 if (type != 0 && AGGREGATE_TYPE_P (type))
11527 return PAD_UPWARD;
11530 /* Fall back to the default. */
11531 return default_function_arg_padding (mode, type);
11534 /* If defined, a C expression that gives the alignment boundary, in bits,
11535 of an argument with the specified mode and type. If it is not defined,
11536 PARM_BOUNDARY is used for all arguments.
11538 V.4 wants long longs and doubles to be double word aligned. Just
11539 testing the mode size is a boneheaded way to do this as it means
11540 that other types such as complex int are also double word aligned.
11541 However, we're stuck with this because changing the ABI might break
11542 existing library interfaces.
11544 Quadword align Altivec/VSX vectors.
11545 Quadword align large synthetic vector types. */
11547 static unsigned int
11548 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11550 machine_mode elt_mode;
11551 int n_elts;
11553 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11555 if (DEFAULT_ABI == ABI_V4
11556 && (GET_MODE_SIZE (mode) == 8
11557 || (TARGET_HARD_FLOAT
11558 && !is_complex_IBM_long_double (mode)
11559 && FLOAT128_2REG_P (mode))))
11560 return 64;
11561 else if (FLOAT128_VECTOR_P (mode))
11562 return 128;
11563 else if (PAIRED_VECTOR_MODE (mode)
11564 || (type && TREE_CODE (type) == VECTOR_TYPE
11565 && int_size_in_bytes (type) >= 8
11566 && int_size_in_bytes (type) < 16))
11567 return 64;
11568 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11569 || (type && TREE_CODE (type) == VECTOR_TYPE
11570 && int_size_in_bytes (type) >= 16))
11571 return 128;
11573 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11574 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11575 -mcompat-align-parm is used. */
11576 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11577 || DEFAULT_ABI == ABI_ELFv2)
11578 && type && TYPE_ALIGN (type) > 64)
11580 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11581 or homogeneous float/vector aggregates here. We already handled
11582 vector aggregates above, but still need to check for float here. */
11583 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11584 && !SCALAR_FLOAT_MODE_P (elt_mode));
11586 /* We used to check for BLKmode instead of the above aggregate type
11587 check. Warn when this results in any difference to the ABI. */
11588 if (aggregate_p != (mode == BLKmode))
11590 static bool warned;
11591 if (!warned && warn_psabi)
11593 warned = true;
11594 inform (input_location,
11595 "the ABI of passing aggregates with %d-byte alignment"
11596 " has changed in GCC 5",
11597 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11601 if (aggregate_p)
11602 return 128;
11605 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11606 implement the "aggregate type" check as a BLKmode check here; this
11607 means certain aggregate types are in fact not aligned. */
11608 if (TARGET_MACHO && rs6000_darwin64_abi
11609 && mode == BLKmode
11610 && type && TYPE_ALIGN (type) > 64)
11611 return 128;
11613 return PARM_BOUNDARY;
11616 /* The offset in words to the start of the parameter save area. */
11618 static unsigned int
11619 rs6000_parm_offset (void)
11621 return (DEFAULT_ABI == ABI_V4 ? 2
11622 : DEFAULT_ABI == ABI_ELFv2 ? 4
11623 : 6);
11626 /* For a function parm of MODE and TYPE, return the starting word in
11627 the parameter area. NWORDS of the parameter area are already used. */
11629 static unsigned int
11630 rs6000_parm_start (machine_mode mode, const_tree type,
11631 unsigned int nwords)
11633 unsigned int align;
11635 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11636 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11639 /* Compute the size (in words) of a function argument. */
11641 static unsigned long
11642 rs6000_arg_size (machine_mode mode, const_tree type)
11644 unsigned long size;
11646 if (mode != BLKmode)
11647 size = GET_MODE_SIZE (mode);
11648 else
11649 size = int_size_in_bytes (type);
11651 if (TARGET_32BIT)
11652 return (size + 3) >> 2;
11653 else
11654 return (size + 7) >> 3;
11657 /* Use this to flush pending int fields. */
11659 static void
11660 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11661 HOST_WIDE_INT bitpos, int final)
11663 unsigned int startbit, endbit;
11664 int intregs, intoffset;
11666 /* Handle the situations where a float is taking up the first half
11667 of the GPR, and the other half is empty (typically due to
11668 alignment restrictions). We can detect this by a 8-byte-aligned
11669 int field, or by seeing that this is the final flush for this
11670 argument. Count the word and continue on. */
11671 if (cum->floats_in_gpr == 1
11672 && (cum->intoffset % 64 == 0
11673 || (cum->intoffset == -1 && final)))
11675 cum->words++;
11676 cum->floats_in_gpr = 0;
11679 if (cum->intoffset == -1)
11680 return;
11682 intoffset = cum->intoffset;
11683 cum->intoffset = -1;
11684 cum->floats_in_gpr = 0;
11686 if (intoffset % BITS_PER_WORD != 0)
11688 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11689 if (!int_mode_for_size (bits, 0).exists ())
11691 /* We couldn't find an appropriate mode, which happens,
11692 e.g., in packed structs when there are 3 bytes to load.
11693 Back intoffset back to the beginning of the word in this
11694 case. */
11695 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11699 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11700 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11701 intregs = (endbit - startbit) / BITS_PER_WORD;
11702 cum->words += intregs;
11703 /* words should be unsigned. */
11704 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11706 int pad = (endbit/BITS_PER_WORD) - cum->words;
11707 cum->words += pad;
11711 /* The darwin64 ABI calls for us to recurse down through structs,
11712 looking for elements passed in registers. Unfortunately, we have
11713 to track int register count here also because of misalignments
11714 in powerpc alignment mode. */
11716 static void
11717 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11718 const_tree type,
11719 HOST_WIDE_INT startbitpos)
11721 tree f;
11723 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11724 if (TREE_CODE (f) == FIELD_DECL)
11726 HOST_WIDE_INT bitpos = startbitpos;
11727 tree ftype = TREE_TYPE (f);
11728 machine_mode mode;
11729 if (ftype == error_mark_node)
11730 continue;
11731 mode = TYPE_MODE (ftype);
11733 if (DECL_SIZE (f) != 0
11734 && tree_fits_uhwi_p (bit_position (f)))
11735 bitpos += int_bit_position (f);
11737 /* ??? FIXME: else assume zero offset. */
11739 if (TREE_CODE (ftype) == RECORD_TYPE)
11740 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11741 else if (USE_FP_FOR_ARG_P (cum, mode))
11743 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11744 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11745 cum->fregno += n_fpregs;
11746 /* Single-precision floats present a special problem for
11747 us, because they are smaller than an 8-byte GPR, and so
11748 the structure-packing rules combined with the standard
11749 varargs behavior mean that we want to pack float/float
11750 and float/int combinations into a single register's
11751 space. This is complicated by the arg advance flushing,
11752 which works on arbitrarily large groups of int-type
11753 fields. */
11754 if (mode == SFmode)
11756 if (cum->floats_in_gpr == 1)
11758 /* Two floats in a word; count the word and reset
11759 the float count. */
11760 cum->words++;
11761 cum->floats_in_gpr = 0;
11763 else if (bitpos % 64 == 0)
11765 /* A float at the beginning of an 8-byte word;
11766 count it and put off adjusting cum->words until
11767 we see if a arg advance flush is going to do it
11768 for us. */
11769 cum->floats_in_gpr++;
11771 else
11773 /* The float is at the end of a word, preceded
11774 by integer fields, so the arg advance flush
11775 just above has already set cum->words and
11776 everything is taken care of. */
11779 else
11780 cum->words += n_fpregs;
11782 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11784 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11785 cum->vregno++;
11786 cum->words += 2;
11788 else if (cum->intoffset == -1)
11789 cum->intoffset = bitpos;
11793 /* Check for an item that needs to be considered specially under the darwin 64
11794 bit ABI. These are record types where the mode is BLK or the structure is
11795 8 bytes in size. */
11796 static int
11797 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11799 return rs6000_darwin64_abi
11800 && ((mode == BLKmode
11801 && TREE_CODE (type) == RECORD_TYPE
11802 && int_size_in_bytes (type) > 0)
11803 || (type && TREE_CODE (type) == RECORD_TYPE
11804 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11807 /* Update the data in CUM to advance over an argument
11808 of mode MODE and data type TYPE.
11809 (TYPE is null for libcalls where that information may not be available.)
11811 Note that for args passed by reference, function_arg will be called
11812 with MODE and TYPE set to that of the pointer to the arg, not the arg
11813 itself. */
11815 static void
11816 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11817 const_tree type, bool named, int depth)
11819 machine_mode elt_mode;
11820 int n_elts;
11822 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11824 /* Only tick off an argument if we're not recursing. */
11825 if (depth == 0)
11826 cum->nargs_prototype--;
11828 #ifdef HAVE_AS_GNU_ATTRIBUTE
11829 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11830 && cum->escapes)
11832 if (SCALAR_FLOAT_MODE_P (mode))
11834 rs6000_passes_float = true;
11835 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11836 && (FLOAT128_IBM_P (mode)
11837 || FLOAT128_IEEE_P (mode)
11838 || (type != NULL
11839 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11840 rs6000_passes_long_double = true;
11842 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11843 || (PAIRED_VECTOR_MODE (mode)
11844 && !cum->stdarg
11845 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11846 rs6000_passes_vector = true;
11848 #endif
11850 if (TARGET_ALTIVEC_ABI
11851 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11852 || (type && TREE_CODE (type) == VECTOR_TYPE
11853 && int_size_in_bytes (type) == 16)))
11855 bool stack = false;
11857 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11859 cum->vregno += n_elts;
11861 if (!TARGET_ALTIVEC)
11862 error ("cannot pass argument in vector register because"
11863 " altivec instructions are disabled, use %qs"
11864 " to enable them", "-maltivec");
11866 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11867 even if it is going to be passed in a vector register.
11868 Darwin does the same for variable-argument functions. */
11869 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11870 && TARGET_64BIT)
11871 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11872 stack = true;
11874 else
11875 stack = true;
11877 if (stack)
11879 int align;
11881 /* Vector parameters must be 16-byte aligned. In 32-bit
11882 mode this means we need to take into account the offset
11883 to the parameter save area. In 64-bit mode, they just
11884 have to start on an even word, since the parameter save
11885 area is 16-byte aligned. */
11886 if (TARGET_32BIT)
11887 align = -(rs6000_parm_offset () + cum->words) & 3;
11888 else
11889 align = cum->words & 1;
11890 cum->words += align + rs6000_arg_size (mode, type);
11892 if (TARGET_DEBUG_ARG)
11894 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11895 cum->words, align);
11896 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11897 cum->nargs_prototype, cum->prototype,
11898 GET_MODE_NAME (mode));
11902 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11904 int size = int_size_in_bytes (type);
11905 /* Variable sized types have size == -1 and are
11906 treated as if consisting entirely of ints.
11907 Pad to 16 byte boundary if needed. */
11908 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11909 && (cum->words % 2) != 0)
11910 cum->words++;
11911 /* For varargs, we can just go up by the size of the struct. */
11912 if (!named)
11913 cum->words += (size + 7) / 8;
11914 else
11916 /* It is tempting to say int register count just goes up by
11917 sizeof(type)/8, but this is wrong in a case such as
11918 { int; double; int; } [powerpc alignment]. We have to
11919 grovel through the fields for these too. */
11920 cum->intoffset = 0;
11921 cum->floats_in_gpr = 0;
11922 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11923 rs6000_darwin64_record_arg_advance_flush (cum,
11924 size * BITS_PER_UNIT, 1);
11926 if (TARGET_DEBUG_ARG)
11928 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11929 cum->words, TYPE_ALIGN (type), size);
11930 fprintf (stderr,
11931 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11932 cum->nargs_prototype, cum->prototype,
11933 GET_MODE_NAME (mode));
11936 else if (DEFAULT_ABI == ABI_V4)
11938 if (abi_v4_pass_in_fpr (mode, named))
11940 /* _Decimal128 must use an even/odd register pair. This assumes
11941 that the register number is odd when fregno is odd. */
11942 if (mode == TDmode && (cum->fregno % 2) == 1)
11943 cum->fregno++;
11945 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11946 <= FP_ARG_V4_MAX_REG)
11947 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11948 else
11950 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11951 if (mode == DFmode || FLOAT128_IBM_P (mode)
11952 || mode == DDmode || mode == TDmode)
11953 cum->words += cum->words & 1;
11954 cum->words += rs6000_arg_size (mode, type);
11957 else
11959 int n_words = rs6000_arg_size (mode, type);
11960 int gregno = cum->sysv_gregno;
11962 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11963 As does any other 2 word item such as complex int due to a
11964 historical mistake. */
11965 if (n_words == 2)
11966 gregno += (1 - gregno) & 1;
11968 /* Multi-reg args are not split between registers and stack. */
11969 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11971 /* Long long is aligned on the stack. So are other 2 word
11972 items such as complex int due to a historical mistake. */
11973 if (n_words == 2)
11974 cum->words += cum->words & 1;
11975 cum->words += n_words;
11978 /* Note: continuing to accumulate gregno past when we've started
11979 spilling to the stack indicates the fact that we've started
11980 spilling to the stack to expand_builtin_saveregs. */
11981 cum->sysv_gregno = gregno + n_words;
11984 if (TARGET_DEBUG_ARG)
11986 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11987 cum->words, cum->fregno);
11988 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11989 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11990 fprintf (stderr, "mode = %4s, named = %d\n",
11991 GET_MODE_NAME (mode), named);
11994 else
11996 int n_words = rs6000_arg_size (mode, type);
11997 int start_words = cum->words;
11998 int align_words = rs6000_parm_start (mode, type, start_words);
12000 cum->words = align_words + n_words;
12002 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
12004 /* _Decimal128 must be passed in an even/odd float register pair.
12005 This assumes that the register number is odd when fregno is
12006 odd. */
12007 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12008 cum->fregno++;
12009 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12012 if (TARGET_DEBUG_ARG)
12014 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12015 cum->words, cum->fregno);
12016 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12017 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12018 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12019 named, align_words - start_words, depth);
12024 static void
12025 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12026 const_tree type, bool named)
12028 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12032 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12033 structure between cum->intoffset and bitpos to integer registers. */
12035 static void
12036 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12037 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12039 machine_mode mode;
12040 unsigned int regno;
12041 unsigned int startbit, endbit;
12042 int this_regno, intregs, intoffset;
12043 rtx reg;
12045 if (cum->intoffset == -1)
12046 return;
12048 intoffset = cum->intoffset;
12049 cum->intoffset = -1;
12051 /* If this is the trailing part of a word, try to only load that
12052 much into the register. Otherwise load the whole register. Note
12053 that in the latter case we may pick up unwanted bits. It's not a
12054 problem at the moment but may wish to revisit. */
12056 if (intoffset % BITS_PER_WORD != 0)
12058 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12059 if (!int_mode_for_size (bits, 0).exists (&mode))
12061 /* We couldn't find an appropriate mode, which happens,
12062 e.g., in packed structs when there are 3 bytes to load.
12063 Back intoffset back to the beginning of the word in this
12064 case. */
12065 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12066 mode = word_mode;
12069 else
12070 mode = word_mode;
12072 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12073 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12074 intregs = (endbit - startbit) / BITS_PER_WORD;
12075 this_regno = cum->words + intoffset / BITS_PER_WORD;
12077 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12078 cum->use_stack = 1;
12080 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12081 if (intregs <= 0)
12082 return;
12084 intoffset /= BITS_PER_UNIT;
12087 regno = GP_ARG_MIN_REG + this_regno;
12088 reg = gen_rtx_REG (mode, regno);
12089 rvec[(*k)++] =
12090 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12092 this_regno += 1;
12093 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12094 mode = word_mode;
12095 intregs -= 1;
12097 while (intregs > 0);
12100 /* Recursive workhorse for the following. */
12102 static void
12103 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12104 HOST_WIDE_INT startbitpos, rtx rvec[],
12105 int *k)
12107 tree f;
12109 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12110 if (TREE_CODE (f) == FIELD_DECL)
12112 HOST_WIDE_INT bitpos = startbitpos;
12113 tree ftype = TREE_TYPE (f);
12114 machine_mode mode;
12115 if (ftype == error_mark_node)
12116 continue;
12117 mode = TYPE_MODE (ftype);
12119 if (DECL_SIZE (f) != 0
12120 && tree_fits_uhwi_p (bit_position (f)))
12121 bitpos += int_bit_position (f);
12123 /* ??? FIXME: else assume zero offset. */
12125 if (TREE_CODE (ftype) == RECORD_TYPE)
12126 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12127 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12129 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12130 #if 0
12131 switch (mode)
12133 case E_SCmode: mode = SFmode; break;
12134 case E_DCmode: mode = DFmode; break;
12135 case E_TCmode: mode = TFmode; break;
12136 default: break;
12138 #endif
12139 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12140 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12142 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12143 && (mode == TFmode || mode == TDmode));
12144 /* Long double or _Decimal128 split over regs and memory. */
12145 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12146 cum->use_stack=1;
12148 rvec[(*k)++]
12149 = gen_rtx_EXPR_LIST (VOIDmode,
12150 gen_rtx_REG (mode, cum->fregno++),
12151 GEN_INT (bitpos / BITS_PER_UNIT));
12152 if (FLOAT128_2REG_P (mode))
12153 cum->fregno++;
12155 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12157 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12158 rvec[(*k)++]
12159 = gen_rtx_EXPR_LIST (VOIDmode,
12160 gen_rtx_REG (mode, cum->vregno++),
12161 GEN_INT (bitpos / BITS_PER_UNIT));
12163 else if (cum->intoffset == -1)
12164 cum->intoffset = bitpos;
12168 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12169 the register(s) to be used for each field and subfield of a struct
12170 being passed by value, along with the offset of where the
12171 register's value may be found in the block. FP fields go in FP
12172 register, vector fields go in vector registers, and everything
12173 else goes in int registers, packed as in memory.
12175 This code is also used for function return values. RETVAL indicates
12176 whether this is the case.
12178 Much of this is taken from the SPARC V9 port, which has a similar
12179 calling convention. */
12181 static rtx
12182 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12183 bool named, bool retval)
12185 rtx rvec[FIRST_PSEUDO_REGISTER];
12186 int k = 1, kbase = 1;
12187 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12188 /* This is a copy; modifications are not visible to our caller. */
12189 CUMULATIVE_ARGS copy_cum = *orig_cum;
12190 CUMULATIVE_ARGS *cum = &copy_cum;
12192 /* Pad to 16 byte boundary if needed. */
12193 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12194 && (cum->words % 2) != 0)
12195 cum->words++;
12197 cum->intoffset = 0;
12198 cum->use_stack = 0;
12199 cum->named = named;
12201 /* Put entries into rvec[] for individual FP and vector fields, and
12202 for the chunks of memory that go in int regs. Note we start at
12203 element 1; 0 is reserved for an indication of using memory, and
12204 may or may not be filled in below. */
12205 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12206 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12208 /* If any part of the struct went on the stack put all of it there.
12209 This hack is because the generic code for
12210 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12211 parts of the struct are not at the beginning. */
12212 if (cum->use_stack)
12214 if (retval)
12215 return NULL_RTX; /* doesn't go in registers at all */
12216 kbase = 0;
12217 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12219 if (k > 1 || cum->use_stack)
12220 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12221 else
12222 return NULL_RTX;
12225 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12227 static rtx
12228 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12229 int align_words)
12231 int n_units;
12232 int i, k;
12233 rtx rvec[GP_ARG_NUM_REG + 1];
12235 if (align_words >= GP_ARG_NUM_REG)
12236 return NULL_RTX;
12238 n_units = rs6000_arg_size (mode, type);
12240 /* Optimize the simple case where the arg fits in one gpr, except in
12241 the case of BLKmode due to assign_parms assuming that registers are
12242 BITS_PER_WORD wide. */
12243 if (n_units == 0
12244 || (n_units == 1 && mode != BLKmode))
12245 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12247 k = 0;
12248 if (align_words + n_units > GP_ARG_NUM_REG)
12249 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12250 using a magic NULL_RTX component.
12251 This is not strictly correct. Only some of the arg belongs in
12252 memory, not all of it. However, the normal scheme using
12253 function_arg_partial_nregs can result in unusual subregs, eg.
12254 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12255 store the whole arg to memory is often more efficient than code
12256 to store pieces, and we know that space is available in the right
12257 place for the whole arg. */
12258 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12260 i = 0;
12263 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12264 rtx off = GEN_INT (i++ * 4);
12265 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12267 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12269 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12272 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12273 but must also be copied into the parameter save area starting at
12274 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12275 to the GPRs and/or memory. Return the number of elements used. */
12277 static int
12278 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12279 int align_words, rtx *rvec)
12281 int k = 0;
12283 if (align_words < GP_ARG_NUM_REG)
12285 int n_words = rs6000_arg_size (mode, type);
12287 if (align_words + n_words > GP_ARG_NUM_REG
12288 || mode == BLKmode
12289 || (TARGET_32BIT && TARGET_POWERPC64))
12291 /* If this is partially on the stack, then we only
12292 include the portion actually in registers here. */
12293 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12294 int i = 0;
12296 if (align_words + n_words > GP_ARG_NUM_REG)
12298 /* Not all of the arg fits in gprs. Say that it goes in memory
12299 too, using a magic NULL_RTX component. Also see comment in
12300 rs6000_mixed_function_arg for why the normal
12301 function_arg_partial_nregs scheme doesn't work in this case. */
12302 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12307 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12308 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12309 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12311 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12313 else
12315 /* The whole arg fits in gprs. */
12316 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12317 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12320 else
12322 /* It's entirely in memory. */
12323 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12326 return k;
12329 /* RVEC is a vector of K components of an argument of mode MODE.
12330 Construct the final function_arg return value from it. */
12332 static rtx
12333 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12335 gcc_assert (k >= 1);
12337 /* Avoid returning a PARALLEL in the trivial cases. */
12338 if (k == 1)
12340 if (XEXP (rvec[0], 0) == NULL_RTX)
12341 return NULL_RTX;
12343 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12344 return XEXP (rvec[0], 0);
12347 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12350 /* Determine where to put an argument to a function.
12351 Value is zero to push the argument on the stack,
12352 or a hard register in which to store the argument.
12354 MODE is the argument's machine mode.
12355 TYPE is the data type of the argument (as a tree).
12356 This is null for libcalls where that information may
12357 not be available.
12358 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12359 the preceding args and about the function being called. It is
12360 not modified in this routine.
12361 NAMED is nonzero if this argument is a named parameter
12362 (otherwise it is an extra parameter matching an ellipsis).
12364 On RS/6000 the first eight words of non-FP are normally in registers
12365 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12366 Under V.4, the first 8 FP args are in registers.
12368 If this is floating-point and no prototype is specified, we use
12369 both an FP and integer register (or possibly FP reg and stack). Library
12370 functions (when CALL_LIBCALL is set) always have the proper types for args,
12371 so we can pass the FP value just in one register. emit_library_function
12372 doesn't support PARALLEL anyway.
12374 Note that for args passed by reference, function_arg will be called
12375 with MODE and TYPE set to that of the pointer to the arg, not the arg
12376 itself. */
12378 static rtx
12379 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12380 const_tree type, bool named)
12382 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12383 enum rs6000_abi abi = DEFAULT_ABI;
12384 machine_mode elt_mode;
12385 int n_elts;
12387 /* Return a marker to indicate whether CR1 needs to set or clear the
12388 bit that V.4 uses to say fp args were passed in registers.
12389 Assume that we don't need the marker for software floating point,
12390 or compiler generated library calls. */
12391 if (mode == VOIDmode)
12393 if (abi == ABI_V4
12394 && (cum->call_cookie & CALL_LIBCALL) == 0
12395 && (cum->stdarg
12396 || (cum->nargs_prototype < 0
12397 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12398 && TARGET_HARD_FLOAT)
12399 return GEN_INT (cum->call_cookie
12400 | ((cum->fregno == FP_ARG_MIN_REG)
12401 ? CALL_V4_SET_FP_ARGS
12402 : CALL_V4_CLEAR_FP_ARGS));
12404 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12407 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12409 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12411 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12412 if (rslt != NULL_RTX)
12413 return rslt;
12414 /* Else fall through to usual handling. */
12417 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12419 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12420 rtx r, off;
12421 int i, k = 0;
12423 /* Do we also need to pass this argument in the parameter save area?
12424 Library support functions for IEEE 128-bit are assumed to not need the
12425 value passed both in GPRs and in vector registers. */
12426 if (TARGET_64BIT && !cum->prototype
12427 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12429 int align_words = ROUND_UP (cum->words, 2);
12430 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12433 /* Describe where this argument goes in the vector registers. */
12434 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12436 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12437 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12438 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12441 return rs6000_finish_function_arg (mode, rvec, k);
12443 else if (TARGET_ALTIVEC_ABI
12444 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12445 || (type && TREE_CODE (type) == VECTOR_TYPE
12446 && int_size_in_bytes (type) == 16)))
12448 if (named || abi == ABI_V4)
12449 return NULL_RTX;
12450 else
12452 /* Vector parameters to varargs functions under AIX or Darwin
12453 get passed in memory and possibly also in GPRs. */
12454 int align, align_words, n_words;
12455 machine_mode part_mode;
12457 /* Vector parameters must be 16-byte aligned. In 32-bit
12458 mode this means we need to take into account the offset
12459 to the parameter save area. In 64-bit mode, they just
12460 have to start on an even word, since the parameter save
12461 area is 16-byte aligned. */
12462 if (TARGET_32BIT)
12463 align = -(rs6000_parm_offset () + cum->words) & 3;
12464 else
12465 align = cum->words & 1;
12466 align_words = cum->words + align;
12468 /* Out of registers? Memory, then. */
12469 if (align_words >= GP_ARG_NUM_REG)
12470 return NULL_RTX;
12472 if (TARGET_32BIT && TARGET_POWERPC64)
12473 return rs6000_mixed_function_arg (mode, type, align_words);
12475 /* The vector value goes in GPRs. Only the part of the
12476 value in GPRs is reported here. */
12477 part_mode = mode;
12478 n_words = rs6000_arg_size (mode, type);
12479 if (align_words + n_words > GP_ARG_NUM_REG)
12480 /* Fortunately, there are only two possibilities, the value
12481 is either wholly in GPRs or half in GPRs and half not. */
12482 part_mode = DImode;
12484 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12488 else if (abi == ABI_V4)
12490 if (abi_v4_pass_in_fpr (mode, named))
12492 /* _Decimal128 must use an even/odd register pair. This assumes
12493 that the register number is odd when fregno is odd. */
12494 if (mode == TDmode && (cum->fregno % 2) == 1)
12495 cum->fregno++;
12497 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12498 <= FP_ARG_V4_MAX_REG)
12499 return gen_rtx_REG (mode, cum->fregno);
12500 else
12501 return NULL_RTX;
12503 else
12505 int n_words = rs6000_arg_size (mode, type);
12506 int gregno = cum->sysv_gregno;
12508 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12509 As does any other 2 word item such as complex int due to a
12510 historical mistake. */
12511 if (n_words == 2)
12512 gregno += (1 - gregno) & 1;
12514 /* Multi-reg args are not split between registers and stack. */
12515 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12516 return NULL_RTX;
12518 if (TARGET_32BIT && TARGET_POWERPC64)
12519 return rs6000_mixed_function_arg (mode, type,
12520 gregno - GP_ARG_MIN_REG);
12521 return gen_rtx_REG (mode, gregno);
12524 else
12526 int align_words = rs6000_parm_start (mode, type, cum->words);
12528 /* _Decimal128 must be passed in an even/odd float register pair.
12529 This assumes that the register number is odd when fregno is odd. */
12530 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12531 cum->fregno++;
12533 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12535 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12536 rtx r, off;
12537 int i, k = 0;
12538 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12539 int fpr_words;
12541 /* Do we also need to pass this argument in the parameter
12542 save area? */
12543 if (type && (cum->nargs_prototype <= 0
12544 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12545 && TARGET_XL_COMPAT
12546 && align_words >= GP_ARG_NUM_REG)))
12547 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12549 /* Describe where this argument goes in the fprs. */
12550 for (i = 0; i < n_elts
12551 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12553 /* Check if the argument is split over registers and memory.
12554 This can only ever happen for long double or _Decimal128;
12555 complex types are handled via split_complex_arg. */
12556 machine_mode fmode = elt_mode;
12557 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12559 gcc_assert (FLOAT128_2REG_P (fmode));
12560 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12563 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12564 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12565 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12568 /* If there were not enough FPRs to hold the argument, the rest
12569 usually goes into memory. However, if the current position
12570 is still within the register parameter area, a portion may
12571 actually have to go into GPRs.
12573 Note that it may happen that the portion of the argument
12574 passed in the first "half" of the first GPR was already
12575 passed in the last FPR as well.
12577 For unnamed arguments, we already set up GPRs to cover the
12578 whole argument in rs6000_psave_function_arg, so there is
12579 nothing further to do at this point. */
12580 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12581 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12582 && cum->nargs_prototype > 0)
12584 static bool warned;
12586 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12587 int n_words = rs6000_arg_size (mode, type);
12589 align_words += fpr_words;
12590 n_words -= fpr_words;
12594 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12595 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12596 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12598 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12600 if (!warned && warn_psabi)
12602 warned = true;
12603 inform (input_location,
12604 "the ABI of passing homogeneous float aggregates"
12605 " has changed in GCC 5");
12609 return rs6000_finish_function_arg (mode, rvec, k);
12611 else if (align_words < GP_ARG_NUM_REG)
12613 if (TARGET_32BIT && TARGET_POWERPC64)
12614 return rs6000_mixed_function_arg (mode, type, align_words);
12616 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12618 else
12619 return NULL_RTX;
12623 /* For an arg passed partly in registers and partly in memory, this is
12624 the number of bytes passed in registers. For args passed entirely in
12625 registers or entirely in memory, zero. When an arg is described by a
12626 PARALLEL, perhaps using more than one register type, this function
12627 returns the number of bytes used by the first element of the PARALLEL. */
12629 static int
12630 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12631 tree type, bool named)
12633 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12634 bool passed_in_gprs = true;
12635 int ret = 0;
12636 int align_words;
12637 machine_mode elt_mode;
12638 int n_elts;
12640 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12642 if (DEFAULT_ABI == ABI_V4)
12643 return 0;
12645 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12647 /* If we are passing this arg in the fixed parameter save area (gprs or
12648 memory) as well as VRs, we do not use the partial bytes mechanism;
12649 instead, rs6000_function_arg will return a PARALLEL including a memory
12650 element as necessary. Library support functions for IEEE 128-bit are
12651 assumed to not need the value passed both in GPRs and in vector
12652 registers. */
12653 if (TARGET_64BIT && !cum->prototype
12654 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12655 return 0;
12657 /* Otherwise, we pass in VRs only. Check for partial copies. */
12658 passed_in_gprs = false;
12659 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12660 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12663 /* In this complicated case we just disable the partial_nregs code. */
12664 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12665 return 0;
12667 align_words = rs6000_parm_start (mode, type, cum->words);
12669 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12671 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12673 /* If we are passing this arg in the fixed parameter save area
12674 (gprs or memory) as well as FPRs, we do not use the partial
12675 bytes mechanism; instead, rs6000_function_arg will return a
12676 PARALLEL including a memory element as necessary. */
12677 if (type
12678 && (cum->nargs_prototype <= 0
12679 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12680 && TARGET_XL_COMPAT
12681 && align_words >= GP_ARG_NUM_REG)))
12682 return 0;
12684 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12685 passed_in_gprs = false;
12686 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12688 /* Compute number of bytes / words passed in FPRs. If there
12689 is still space available in the register parameter area
12690 *after* that amount, a part of the argument will be passed
12691 in GPRs. In that case, the total amount passed in any
12692 registers is equal to the amount that would have been passed
12693 in GPRs if everything were passed there, so we fall back to
12694 the GPR code below to compute the appropriate value. */
12695 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12696 * MIN (8, GET_MODE_SIZE (elt_mode)));
12697 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12699 if (align_words + fpr_words < GP_ARG_NUM_REG)
12700 passed_in_gprs = true;
12701 else
12702 ret = fpr;
12706 if (passed_in_gprs
12707 && align_words < GP_ARG_NUM_REG
12708 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12709 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12711 if (ret != 0 && TARGET_DEBUG_ARG)
12712 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12714 return ret;
12717 /* A C expression that indicates when an argument must be passed by
12718 reference. If nonzero for an argument, a copy of that argument is
12719 made in memory and a pointer to the argument is passed instead of
12720 the argument itself. The pointer is passed in whatever way is
12721 appropriate for passing a pointer to that type.
12723 Under V.4, aggregates and long double are passed by reference.
12725 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12726 reference unless the AltiVec vector extension ABI is in force.
12728 As an extension to all ABIs, variable sized types are passed by
12729 reference. */
12731 static bool
12732 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12733 machine_mode mode, const_tree type,
12734 bool named ATTRIBUTE_UNUSED)
12736 if (!type)
12737 return 0;
12739 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12740 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12742 if (TARGET_DEBUG_ARG)
12743 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12744 return 1;
12747 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12749 if (TARGET_DEBUG_ARG)
12750 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12751 return 1;
12754 if (int_size_in_bytes (type) < 0)
12756 if (TARGET_DEBUG_ARG)
12757 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12758 return 1;
12761 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12762 modes only exist for GCC vector types if -maltivec. */
12763 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12765 if (TARGET_DEBUG_ARG)
12766 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12767 return 1;
12770 /* Pass synthetic vectors in memory. */
12771 if (TREE_CODE (type) == VECTOR_TYPE
12772 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12774 static bool warned_for_pass_big_vectors = false;
12775 if (TARGET_DEBUG_ARG)
12776 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12777 if (!warned_for_pass_big_vectors)
12779 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12780 "non-standard ABI extension with no compatibility "
12781 "guarantee");
12782 warned_for_pass_big_vectors = true;
12784 return 1;
12787 return 0;
12790 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12791 already processes. Return true if the parameter must be passed
12792 (fully or partially) on the stack. */
12794 static bool
12795 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12797 machine_mode mode;
12798 int unsignedp;
12799 rtx entry_parm;
12801 /* Catch errors. */
12802 if (type == NULL || type == error_mark_node)
12803 return true;
12805 /* Handle types with no storage requirement. */
12806 if (TYPE_MODE (type) == VOIDmode)
12807 return false;
12809 /* Handle complex types. */
12810 if (TREE_CODE (type) == COMPLEX_TYPE)
12811 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12812 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12814 /* Handle transparent aggregates. */
12815 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12816 && TYPE_TRANSPARENT_AGGR (type))
12817 type = TREE_TYPE (first_field (type));
12819 /* See if this arg was passed by invisible reference. */
12820 if (pass_by_reference (get_cumulative_args (args_so_far),
12821 TYPE_MODE (type), type, true))
12822 type = build_pointer_type (type);
12824 /* Find mode as it is passed by the ABI. */
12825 unsignedp = TYPE_UNSIGNED (type);
12826 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12828 /* If we must pass in stack, we need a stack. */
12829 if (rs6000_must_pass_in_stack (mode, type))
12830 return true;
12832 /* If there is no incoming register, we need a stack. */
12833 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12834 if (entry_parm == NULL)
12835 return true;
12837 /* Likewise if we need to pass both in registers and on the stack. */
12838 if (GET_CODE (entry_parm) == PARALLEL
12839 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12840 return true;
12842 /* Also true if we're partially in registers and partially not. */
12843 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12844 return true;
12846 /* Update info on where next arg arrives in registers. */
12847 rs6000_function_arg_advance (args_so_far, mode, type, true);
12848 return false;
12851 /* Return true if FUN has no prototype, has a variable argument
12852 list, or passes any parameter in memory. */
12854 static bool
12855 rs6000_function_parms_need_stack (tree fun, bool incoming)
12857 tree fntype, result;
12858 CUMULATIVE_ARGS args_so_far_v;
12859 cumulative_args_t args_so_far;
12861 if (!fun)
12862 /* Must be a libcall, all of which only use reg parms. */
12863 return false;
12865 fntype = fun;
12866 if (!TYPE_P (fun))
12867 fntype = TREE_TYPE (fun);
12869 /* Varargs functions need the parameter save area. */
12870 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12871 return true;
12873 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12874 args_so_far = pack_cumulative_args (&args_so_far_v);
12876 /* When incoming, we will have been passed the function decl.
12877 It is necessary to use the decl to handle K&R style functions,
12878 where TYPE_ARG_TYPES may not be available. */
12879 if (incoming)
12881 gcc_assert (DECL_P (fun));
12882 result = DECL_RESULT (fun);
12884 else
12885 result = TREE_TYPE (fntype);
12887 if (result && aggregate_value_p (result, fntype))
12889 if (!TYPE_P (result))
12890 result = TREE_TYPE (result);
12891 result = build_pointer_type (result);
12892 rs6000_parm_needs_stack (args_so_far, result);
12895 if (incoming)
12897 tree parm;
12899 for (parm = DECL_ARGUMENTS (fun);
12900 parm && parm != void_list_node;
12901 parm = TREE_CHAIN (parm))
12902 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12903 return true;
12905 else
12907 function_args_iterator args_iter;
12908 tree arg_type;
12910 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12911 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12912 return true;
12915 return false;
12918 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12919 usually a constant depending on the ABI. However, in the ELFv2 ABI
12920 the register parameter area is optional when calling a function that
12921 has a prototype is scope, has no variable argument list, and passes
12922 all parameters in registers. */
12925 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12927 int reg_parm_stack_space;
12929 switch (DEFAULT_ABI)
12931 default:
12932 reg_parm_stack_space = 0;
12933 break;
12935 case ABI_AIX:
12936 case ABI_DARWIN:
12937 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12938 break;
12940 case ABI_ELFv2:
12941 /* ??? Recomputing this every time is a bit expensive. Is there
12942 a place to cache this information? */
12943 if (rs6000_function_parms_need_stack (fun, incoming))
12944 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12945 else
12946 reg_parm_stack_space = 0;
12947 break;
12950 return reg_parm_stack_space;
12953 static void
12954 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12956 int i;
12957 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12959 if (nregs == 0)
12960 return;
12962 for (i = 0; i < nregs; i++)
12964 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12965 if (reload_completed)
12967 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12968 tem = NULL_RTX;
12969 else
12970 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12971 i * GET_MODE_SIZE (reg_mode));
12973 else
12974 tem = replace_equiv_address (tem, XEXP (tem, 0));
12976 gcc_assert (tem);
12978 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12982 /* Perform any needed actions needed for a function that is receiving a
12983 variable number of arguments.
12985 CUM is as above.
12987 MODE and TYPE are the mode and type of the current parameter.
12989 PRETEND_SIZE is a variable that should be set to the amount of stack
12990 that must be pushed by the prolog to pretend that our caller pushed
12993 Normally, this macro will push all remaining incoming registers on the
12994 stack and set PRETEND_SIZE to the length of the registers pushed. */
12996 static void
12997 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12998 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12999 int no_rtl)
13001 CUMULATIVE_ARGS next_cum;
13002 int reg_size = TARGET_32BIT ? 4 : 8;
13003 rtx save_area = NULL_RTX, mem;
13004 int first_reg_offset;
13005 alias_set_type set;
13007 /* Skip the last named argument. */
13008 next_cum = *get_cumulative_args (cum);
13009 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13011 if (DEFAULT_ABI == ABI_V4)
13013 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13015 if (! no_rtl)
13017 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13018 HOST_WIDE_INT offset = 0;
13020 /* Try to optimize the size of the varargs save area.
13021 The ABI requires that ap.reg_save_area is doubleword
13022 aligned, but we don't need to allocate space for all
13023 the bytes, only those to which we actually will save
13024 anything. */
13025 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13026 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13027 if (TARGET_HARD_FLOAT
13028 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13029 && cfun->va_list_fpr_size)
13031 if (gpr_reg_num)
13032 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13033 * UNITS_PER_FP_WORD;
13034 if (cfun->va_list_fpr_size
13035 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13036 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13037 else
13038 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13039 * UNITS_PER_FP_WORD;
13041 if (gpr_reg_num)
13043 offset = -((first_reg_offset * reg_size) & ~7);
13044 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13046 gpr_reg_num = cfun->va_list_gpr_size;
13047 if (reg_size == 4 && (first_reg_offset & 1))
13048 gpr_reg_num++;
13050 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13052 else if (fpr_size)
13053 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13054 * UNITS_PER_FP_WORD
13055 - (int) (GP_ARG_NUM_REG * reg_size);
13057 if (gpr_size + fpr_size)
13059 rtx reg_save_area
13060 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13061 gcc_assert (GET_CODE (reg_save_area) == MEM);
13062 reg_save_area = XEXP (reg_save_area, 0);
13063 if (GET_CODE (reg_save_area) == PLUS)
13065 gcc_assert (XEXP (reg_save_area, 0)
13066 == virtual_stack_vars_rtx);
13067 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13068 offset += INTVAL (XEXP (reg_save_area, 1));
13070 else
13071 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13074 cfun->machine->varargs_save_offset = offset;
13075 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13078 else
13080 first_reg_offset = next_cum.words;
13081 save_area = crtl->args.internal_arg_pointer;
13083 if (targetm.calls.must_pass_in_stack (mode, type))
13084 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13087 set = get_varargs_alias_set ();
13088 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13089 && cfun->va_list_gpr_size)
13091 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13093 if (va_list_gpr_counter_field)
13094 /* V4 va_list_gpr_size counts number of registers needed. */
13095 n_gpr = cfun->va_list_gpr_size;
13096 else
13097 /* char * va_list instead counts number of bytes needed. */
13098 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13100 if (nregs > n_gpr)
13101 nregs = n_gpr;
13103 mem = gen_rtx_MEM (BLKmode,
13104 plus_constant (Pmode, save_area,
13105 first_reg_offset * reg_size));
13106 MEM_NOTRAP_P (mem) = 1;
13107 set_mem_alias_set (mem, set);
13108 set_mem_align (mem, BITS_PER_WORD);
13110 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13111 nregs);
13114 /* Save FP registers if needed. */
13115 if (DEFAULT_ABI == ABI_V4
13116 && TARGET_HARD_FLOAT
13117 && ! no_rtl
13118 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13119 && cfun->va_list_fpr_size)
13121 int fregno = next_cum.fregno, nregs;
13122 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13123 rtx lab = gen_label_rtx ();
13124 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13125 * UNITS_PER_FP_WORD);
13127 emit_jump_insn
13128 (gen_rtx_SET (pc_rtx,
13129 gen_rtx_IF_THEN_ELSE (VOIDmode,
13130 gen_rtx_NE (VOIDmode, cr1,
13131 const0_rtx),
13132 gen_rtx_LABEL_REF (VOIDmode, lab),
13133 pc_rtx)));
13135 for (nregs = 0;
13136 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13137 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13139 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13140 ? DFmode : SFmode,
13141 plus_constant (Pmode, save_area, off));
13142 MEM_NOTRAP_P (mem) = 1;
13143 set_mem_alias_set (mem, set);
13144 set_mem_align (mem, GET_MODE_ALIGNMENT (
13145 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13146 ? DFmode : SFmode));
13147 emit_move_insn (mem, gen_rtx_REG (
13148 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13149 ? DFmode : SFmode, fregno));
13152 emit_label (lab);
13156 /* Create the va_list data type. */
13158 static tree
13159 rs6000_build_builtin_va_list (void)
13161 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13163 /* For AIX, prefer 'char *' because that's what the system
13164 header files like. */
13165 if (DEFAULT_ABI != ABI_V4)
13166 return build_pointer_type (char_type_node);
13168 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13169 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13170 get_identifier ("__va_list_tag"), record);
13172 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13173 unsigned_char_type_node);
13174 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13175 unsigned_char_type_node);
13176 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13177 every user file. */
13178 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13179 get_identifier ("reserved"), short_unsigned_type_node);
13180 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13181 get_identifier ("overflow_arg_area"),
13182 ptr_type_node);
13183 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13184 get_identifier ("reg_save_area"),
13185 ptr_type_node);
13187 va_list_gpr_counter_field = f_gpr;
13188 va_list_fpr_counter_field = f_fpr;
13190 DECL_FIELD_CONTEXT (f_gpr) = record;
13191 DECL_FIELD_CONTEXT (f_fpr) = record;
13192 DECL_FIELD_CONTEXT (f_res) = record;
13193 DECL_FIELD_CONTEXT (f_ovf) = record;
13194 DECL_FIELD_CONTEXT (f_sav) = record;
13196 TYPE_STUB_DECL (record) = type_decl;
13197 TYPE_NAME (record) = type_decl;
13198 TYPE_FIELDS (record) = f_gpr;
13199 DECL_CHAIN (f_gpr) = f_fpr;
13200 DECL_CHAIN (f_fpr) = f_res;
13201 DECL_CHAIN (f_res) = f_ovf;
13202 DECL_CHAIN (f_ovf) = f_sav;
13204 layout_type (record);
13206 /* The correct type is an array type of one element. */
13207 return build_array_type (record, build_index_type (size_zero_node));
13210 /* Implement va_start. */
13212 static void
13213 rs6000_va_start (tree valist, rtx nextarg)
13215 HOST_WIDE_INT words, n_gpr, n_fpr;
13216 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13217 tree gpr, fpr, ovf, sav, t;
13219 /* Only SVR4 needs something special. */
13220 if (DEFAULT_ABI != ABI_V4)
13222 std_expand_builtin_va_start (valist, nextarg);
13223 return;
13226 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13227 f_fpr = DECL_CHAIN (f_gpr);
13228 f_res = DECL_CHAIN (f_fpr);
13229 f_ovf = DECL_CHAIN (f_res);
13230 f_sav = DECL_CHAIN (f_ovf);
13232 valist = build_simple_mem_ref (valist);
13233 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13234 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13235 f_fpr, NULL_TREE);
13236 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13237 f_ovf, NULL_TREE);
13238 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13239 f_sav, NULL_TREE);
13241 /* Count number of gp and fp argument registers used. */
13242 words = crtl->args.info.words;
13243 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13244 GP_ARG_NUM_REG);
13245 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13246 FP_ARG_NUM_REG);
13248 if (TARGET_DEBUG_ARG)
13249 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13250 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13251 words, n_gpr, n_fpr);
13253 if (cfun->va_list_gpr_size)
13255 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13256 build_int_cst (NULL_TREE, n_gpr));
13257 TREE_SIDE_EFFECTS (t) = 1;
13258 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13261 if (cfun->va_list_fpr_size)
13263 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13264 build_int_cst (NULL_TREE, n_fpr));
13265 TREE_SIDE_EFFECTS (t) = 1;
13266 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13268 #ifdef HAVE_AS_GNU_ATTRIBUTE
13269 if (call_ABI_of_interest (cfun->decl))
13270 rs6000_passes_float = true;
13271 #endif
13274 /* Find the overflow area. */
13275 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13276 if (words != 0)
13277 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13278 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13279 TREE_SIDE_EFFECTS (t) = 1;
13280 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13282 /* If there were no va_arg invocations, don't set up the register
13283 save area. */
13284 if (!cfun->va_list_gpr_size
13285 && !cfun->va_list_fpr_size
13286 && n_gpr < GP_ARG_NUM_REG
13287 && n_fpr < FP_ARG_V4_MAX_REG)
13288 return;
13290 /* Find the register save area. */
13291 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13292 if (cfun->machine->varargs_save_offset)
13293 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13294 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13295 TREE_SIDE_EFFECTS (t) = 1;
13296 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13299 /* Implement va_arg. */
13301 static tree
13302 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13303 gimple_seq *post_p)
13305 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13306 tree gpr, fpr, ovf, sav, reg, t, u;
13307 int size, rsize, n_reg, sav_ofs, sav_scale;
13308 tree lab_false, lab_over, addr;
13309 int align;
13310 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13311 int regalign = 0;
13312 gimple *stmt;
13314 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13316 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13317 return build_va_arg_indirect_ref (t);
13320 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13321 earlier version of gcc, with the property that it always applied alignment
13322 adjustments to the va-args (even for zero-sized types). The cheapest way
13323 to deal with this is to replicate the effect of the part of
13324 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13325 of relevance.
13326 We don't need to check for pass-by-reference because of the test above.
13327 We can return a simplifed answer, since we know there's no offset to add. */
13329 if (((TARGET_MACHO
13330 && rs6000_darwin64_abi)
13331 || DEFAULT_ABI == ABI_ELFv2
13332 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13333 && integer_zerop (TYPE_SIZE (type)))
13335 unsigned HOST_WIDE_INT align, boundary;
13336 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13337 align = PARM_BOUNDARY / BITS_PER_UNIT;
13338 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13339 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13340 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13341 boundary /= BITS_PER_UNIT;
13342 if (boundary > align)
13344 tree t ;
13345 /* This updates arg ptr by the amount that would be necessary
13346 to align the zero-sized (but not zero-alignment) item. */
13347 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13348 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13349 gimplify_and_add (t, pre_p);
13351 t = fold_convert (sizetype, valist_tmp);
13352 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13353 fold_convert (TREE_TYPE (valist),
13354 fold_build2 (BIT_AND_EXPR, sizetype, t,
13355 size_int (-boundary))));
13356 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13357 gimplify_and_add (t, pre_p);
13359 /* Since it is zero-sized there's no increment for the item itself. */
13360 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13361 return build_va_arg_indirect_ref (valist_tmp);
13364 if (DEFAULT_ABI != ABI_V4)
13366 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13368 tree elem_type = TREE_TYPE (type);
13369 machine_mode elem_mode = TYPE_MODE (elem_type);
13370 int elem_size = GET_MODE_SIZE (elem_mode);
13372 if (elem_size < UNITS_PER_WORD)
13374 tree real_part, imag_part;
13375 gimple_seq post = NULL;
13377 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13378 &post);
13379 /* Copy the value into a temporary, lest the formal temporary
13380 be reused out from under us. */
13381 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13382 gimple_seq_add_seq (pre_p, post);
13384 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13385 post_p);
13387 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13391 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13394 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13395 f_fpr = DECL_CHAIN (f_gpr);
13396 f_res = DECL_CHAIN (f_fpr);
13397 f_ovf = DECL_CHAIN (f_res);
13398 f_sav = DECL_CHAIN (f_ovf);
13400 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13401 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13402 f_fpr, NULL_TREE);
13403 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13404 f_ovf, NULL_TREE);
13405 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13406 f_sav, NULL_TREE);
13408 size = int_size_in_bytes (type);
13409 rsize = (size + 3) / 4;
13410 int pad = 4 * rsize - size;
13411 align = 1;
13413 machine_mode mode = TYPE_MODE (type);
13414 if (abi_v4_pass_in_fpr (mode, false))
13416 /* FP args go in FP registers, if present. */
13417 reg = fpr;
13418 n_reg = (size + 7) / 8;
13419 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13420 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13421 if (mode != SFmode && mode != SDmode)
13422 align = 8;
13424 else
13426 /* Otherwise into GP registers. */
13427 reg = gpr;
13428 n_reg = rsize;
13429 sav_ofs = 0;
13430 sav_scale = 4;
13431 if (n_reg == 2)
13432 align = 8;
13435 /* Pull the value out of the saved registers.... */
13437 lab_over = NULL;
13438 addr = create_tmp_var (ptr_type_node, "addr");
13440 /* AltiVec vectors never go in registers when -mabi=altivec. */
13441 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13442 align = 16;
13443 else
13445 lab_false = create_artificial_label (input_location);
13446 lab_over = create_artificial_label (input_location);
13448 /* Long long is aligned in the registers. As are any other 2 gpr
13449 item such as complex int due to a historical mistake. */
13450 u = reg;
13451 if (n_reg == 2 && reg == gpr)
13453 regalign = 1;
13454 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13455 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13456 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13457 unshare_expr (reg), u);
13459 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13460 reg number is 0 for f1, so we want to make it odd. */
13461 else if (reg == fpr && mode == TDmode)
13463 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13464 build_int_cst (TREE_TYPE (reg), 1));
13465 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13468 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13469 t = build2 (GE_EXPR, boolean_type_node, u, t);
13470 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13471 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13472 gimplify_and_add (t, pre_p);
13474 t = sav;
13475 if (sav_ofs)
13476 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13478 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13479 build_int_cst (TREE_TYPE (reg), n_reg));
13480 u = fold_convert (sizetype, u);
13481 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13482 t = fold_build_pointer_plus (t, u);
13484 /* _Decimal32 varargs are located in the second word of the 64-bit
13485 FP register for 32-bit binaries. */
13486 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13487 t = fold_build_pointer_plus_hwi (t, size);
13489 /* Args are passed right-aligned. */
13490 if (BYTES_BIG_ENDIAN)
13491 t = fold_build_pointer_plus_hwi (t, pad);
13493 gimplify_assign (addr, t, pre_p);
13495 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13497 stmt = gimple_build_label (lab_false);
13498 gimple_seq_add_stmt (pre_p, stmt);
13500 if ((n_reg == 2 && !regalign) || n_reg > 2)
13502 /* Ensure that we don't find any more args in regs.
13503 Alignment has taken care of for special cases. */
13504 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13508 /* ... otherwise out of the overflow area. */
13510 /* Care for on-stack alignment if needed. */
13511 t = ovf;
13512 if (align != 1)
13514 t = fold_build_pointer_plus_hwi (t, align - 1);
13515 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13516 build_int_cst (TREE_TYPE (t), -align));
13519 /* Args are passed right-aligned. */
13520 if (BYTES_BIG_ENDIAN)
13521 t = fold_build_pointer_plus_hwi (t, pad);
13523 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13525 gimplify_assign (unshare_expr (addr), t, pre_p);
13527 t = fold_build_pointer_plus_hwi (t, size);
13528 gimplify_assign (unshare_expr (ovf), t, pre_p);
13530 if (lab_over)
13532 stmt = gimple_build_label (lab_over);
13533 gimple_seq_add_stmt (pre_p, stmt);
13536 if (STRICT_ALIGNMENT
13537 && (TYPE_ALIGN (type)
13538 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13540 /* The value (of type complex double, for example) may not be
13541 aligned in memory in the saved registers, so copy via a
13542 temporary. (This is the same code as used for SPARC.) */
13543 tree tmp = create_tmp_var (type, "va_arg_tmp");
13544 tree dest_addr = build_fold_addr_expr (tmp);
13546 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13547 3, dest_addr, addr, size_int (rsize * 4));
13548 TREE_ADDRESSABLE (tmp) = 1;
13550 gimplify_and_add (copy, pre_p);
13551 addr = dest_addr;
13554 addr = fold_convert (ptrtype, addr);
13555 return build_va_arg_indirect_ref (addr);
13558 /* Builtins. */
13560 static void
13561 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13563 tree t;
13564 unsigned classify = rs6000_builtin_info[(int)code].attr;
13565 const char *attr_string = "";
13567 gcc_assert (name != NULL);
13568 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13570 if (rs6000_builtin_decls[(int)code])
13571 fatal_error (input_location,
13572 "internal error: builtin function %qs already processed",
13573 name);
13575 rs6000_builtin_decls[(int)code] = t =
13576 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13578 /* Set any special attributes. */
13579 if ((classify & RS6000_BTC_CONST) != 0)
13581 /* const function, function only depends on the inputs. */
13582 TREE_READONLY (t) = 1;
13583 TREE_NOTHROW (t) = 1;
13584 attr_string = ", const";
13586 else if ((classify & RS6000_BTC_PURE) != 0)
13588 /* pure function, function can read global memory, but does not set any
13589 external state. */
13590 DECL_PURE_P (t) = 1;
13591 TREE_NOTHROW (t) = 1;
13592 attr_string = ", pure";
13594 else if ((classify & RS6000_BTC_FP) != 0)
13596 /* Function is a math function. If rounding mode is on, then treat the
13597 function as not reading global memory, but it can have arbitrary side
13598 effects. If it is off, then assume the function is a const function.
13599 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13600 builtin-attribute.def that is used for the math functions. */
13601 TREE_NOTHROW (t) = 1;
13602 if (flag_rounding_math)
13604 DECL_PURE_P (t) = 1;
13605 DECL_IS_NOVOPS (t) = 1;
13606 attr_string = ", fp, pure";
13608 else
13610 TREE_READONLY (t) = 1;
13611 attr_string = ", fp, const";
13614 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13615 gcc_unreachable ();
13617 if (TARGET_DEBUG_BUILTIN)
13618 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13619 (int)code, name, attr_string);
13622 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13624 #undef RS6000_BUILTIN_0
13625 #undef RS6000_BUILTIN_1
13626 #undef RS6000_BUILTIN_2
13627 #undef RS6000_BUILTIN_3
13628 #undef RS6000_BUILTIN_A
13629 #undef RS6000_BUILTIN_D
13630 #undef RS6000_BUILTIN_H
13631 #undef RS6000_BUILTIN_P
13632 #undef RS6000_BUILTIN_Q
13633 #undef RS6000_BUILTIN_X
13635 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13638 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13639 { MASK, ICODE, NAME, ENUM },
13641 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13642 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13643 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13644 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13645 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13648 static const struct builtin_description bdesc_3arg[] =
13650 #include "rs6000-builtin.def"
13653 /* DST operations: void foo (void *, const int, const char). */
13655 #undef RS6000_BUILTIN_0
13656 #undef RS6000_BUILTIN_1
13657 #undef RS6000_BUILTIN_2
13658 #undef RS6000_BUILTIN_3
13659 #undef RS6000_BUILTIN_A
13660 #undef RS6000_BUILTIN_D
13661 #undef RS6000_BUILTIN_H
13662 #undef RS6000_BUILTIN_P
13663 #undef RS6000_BUILTIN_Q
13664 #undef RS6000_BUILTIN_X
13666 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13669 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13671 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13672 { MASK, ICODE, NAME, ENUM },
13674 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13675 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13676 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13677 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13679 static const struct builtin_description bdesc_dst[] =
13681 #include "rs6000-builtin.def"
13684 /* Simple binary operations: VECc = foo (VECa, VECb). */
13686 #undef RS6000_BUILTIN_0
13687 #undef RS6000_BUILTIN_1
13688 #undef RS6000_BUILTIN_2
13689 #undef RS6000_BUILTIN_3
13690 #undef RS6000_BUILTIN_A
13691 #undef RS6000_BUILTIN_D
13692 #undef RS6000_BUILTIN_H
13693 #undef RS6000_BUILTIN_P
13694 #undef RS6000_BUILTIN_Q
13695 #undef RS6000_BUILTIN_X
13697 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13700 { MASK, ICODE, NAME, ENUM },
13702 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13703 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13704 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13705 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13706 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13707 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13708 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13710 static const struct builtin_description bdesc_2arg[] =
13712 #include "rs6000-builtin.def"
13715 #undef RS6000_BUILTIN_0
13716 #undef RS6000_BUILTIN_1
13717 #undef RS6000_BUILTIN_2
13718 #undef RS6000_BUILTIN_3
13719 #undef RS6000_BUILTIN_A
13720 #undef RS6000_BUILTIN_D
13721 #undef RS6000_BUILTIN_H
13722 #undef RS6000_BUILTIN_P
13723 #undef RS6000_BUILTIN_Q
13724 #undef RS6000_BUILTIN_X
13726 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13727 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13729 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13730 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13731 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13732 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13733 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13734 { MASK, ICODE, NAME, ENUM },
13736 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13737 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13739 /* AltiVec predicates. */
13741 static const struct builtin_description bdesc_altivec_preds[] =
13743 #include "rs6000-builtin.def"
13746 /* PAIRED predicates. */
13747 #undef RS6000_BUILTIN_0
13748 #undef RS6000_BUILTIN_1
13749 #undef RS6000_BUILTIN_2
13750 #undef RS6000_BUILTIN_3
13751 #undef RS6000_BUILTIN_A
13752 #undef RS6000_BUILTIN_D
13753 #undef RS6000_BUILTIN_H
13754 #undef RS6000_BUILTIN_P
13755 #undef RS6000_BUILTIN_Q
13756 #undef RS6000_BUILTIN_X
13758 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13759 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13760 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13761 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13762 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13763 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13764 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13765 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13766 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13767 { MASK, ICODE, NAME, ENUM },
13769 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13771 static const struct builtin_description bdesc_paired_preds[] =
13773 #include "rs6000-builtin.def"
13776 /* ABS* operations. */
13778 #undef RS6000_BUILTIN_0
13779 #undef RS6000_BUILTIN_1
13780 #undef RS6000_BUILTIN_2
13781 #undef RS6000_BUILTIN_3
13782 #undef RS6000_BUILTIN_A
13783 #undef RS6000_BUILTIN_D
13784 #undef RS6000_BUILTIN_H
13785 #undef RS6000_BUILTIN_P
13786 #undef RS6000_BUILTIN_Q
13787 #undef RS6000_BUILTIN_X
13789 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13792 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13793 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13794 { MASK, ICODE, NAME, ENUM },
13796 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13797 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13798 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13799 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13800 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13802 static const struct builtin_description bdesc_abs[] =
13804 #include "rs6000-builtin.def"
13807 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13808 foo (VECa). */
13810 #undef RS6000_BUILTIN_0
13811 #undef RS6000_BUILTIN_1
13812 #undef RS6000_BUILTIN_2
13813 #undef RS6000_BUILTIN_3
13814 #undef RS6000_BUILTIN_A
13815 #undef RS6000_BUILTIN_D
13816 #undef RS6000_BUILTIN_H
13817 #undef RS6000_BUILTIN_P
13818 #undef RS6000_BUILTIN_Q
13819 #undef RS6000_BUILTIN_X
13821 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13823 { MASK, ICODE, NAME, ENUM },
13825 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13827 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13828 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13829 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13830 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13831 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13832 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13834 static const struct builtin_description bdesc_1arg[] =
13836 #include "rs6000-builtin.def"
13839 /* Simple no-argument operations: result = __builtin_darn_32 () */
13841 #undef RS6000_BUILTIN_0
13842 #undef RS6000_BUILTIN_1
13843 #undef RS6000_BUILTIN_2
13844 #undef RS6000_BUILTIN_3
13845 #undef RS6000_BUILTIN_A
13846 #undef RS6000_BUILTIN_D
13847 #undef RS6000_BUILTIN_H
13848 #undef RS6000_BUILTIN_P
13849 #undef RS6000_BUILTIN_Q
13850 #undef RS6000_BUILTIN_X
13852 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13853 { MASK, ICODE, NAME, ENUM },
13855 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13856 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13857 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13858 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13859 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13860 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13861 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13862 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13863 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13865 static const struct builtin_description bdesc_0arg[] =
13867 #include "rs6000-builtin.def"
13870 /* HTM builtins. */
13871 #undef RS6000_BUILTIN_0
13872 #undef RS6000_BUILTIN_1
13873 #undef RS6000_BUILTIN_2
13874 #undef RS6000_BUILTIN_3
13875 #undef RS6000_BUILTIN_A
13876 #undef RS6000_BUILTIN_D
13877 #undef RS6000_BUILTIN_H
13878 #undef RS6000_BUILTIN_P
13879 #undef RS6000_BUILTIN_Q
13880 #undef RS6000_BUILTIN_X
13882 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13883 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13884 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13885 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13886 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13887 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13888 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13889 { MASK, ICODE, NAME, ENUM },
13891 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13892 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13893 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13895 static const struct builtin_description bdesc_htm[] =
13897 #include "rs6000-builtin.def"
13900 #undef RS6000_BUILTIN_0
13901 #undef RS6000_BUILTIN_1
13902 #undef RS6000_BUILTIN_2
13903 #undef RS6000_BUILTIN_3
13904 #undef RS6000_BUILTIN_A
13905 #undef RS6000_BUILTIN_D
13906 #undef RS6000_BUILTIN_H
13907 #undef RS6000_BUILTIN_P
13908 #undef RS6000_BUILTIN_Q
13910 /* Return true if a builtin function is overloaded. */
13911 bool
13912 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13914 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13917 const char *
13918 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13920 return rs6000_builtin_info[(int)fncode].name;
13923 /* Expand an expression EXP that calls a builtin without arguments. */
13924 static rtx
13925 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13927 rtx pat;
13928 machine_mode tmode = insn_data[icode].operand[0].mode;
13930 if (icode == CODE_FOR_nothing)
13931 /* Builtin not supported on this processor. */
13932 return 0;
13934 if (target == 0
13935 || GET_MODE (target) != tmode
13936 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13937 target = gen_reg_rtx (tmode);
13939 pat = GEN_FCN (icode) (target);
13940 if (! pat)
13941 return 0;
13942 emit_insn (pat);
13944 return target;
13948 static rtx
13949 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13951 rtx pat;
13952 tree arg0 = CALL_EXPR_ARG (exp, 0);
13953 tree arg1 = CALL_EXPR_ARG (exp, 1);
13954 rtx op0 = expand_normal (arg0);
13955 rtx op1 = expand_normal (arg1);
13956 machine_mode mode0 = insn_data[icode].operand[0].mode;
13957 machine_mode mode1 = insn_data[icode].operand[1].mode;
13959 if (icode == CODE_FOR_nothing)
13960 /* Builtin not supported on this processor. */
13961 return 0;
13963 /* If we got invalid arguments bail out before generating bad rtl. */
13964 if (arg0 == error_mark_node || arg1 == error_mark_node)
13965 return const0_rtx;
13967 if (GET_CODE (op0) != CONST_INT
13968 || INTVAL (op0) > 255
13969 || INTVAL (op0) < 0)
13971 error ("argument 1 must be an 8-bit field value");
13972 return const0_rtx;
13975 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13976 op0 = copy_to_mode_reg (mode0, op0);
13978 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13979 op1 = copy_to_mode_reg (mode1, op1);
13981 pat = GEN_FCN (icode) (op0, op1);
13982 if (! pat)
13983 return const0_rtx;
13984 emit_insn (pat);
13986 return NULL_RTX;
13989 static rtx
13990 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13992 rtx pat;
13993 tree arg0 = CALL_EXPR_ARG (exp, 0);
13994 rtx op0 = expand_normal (arg0);
13995 machine_mode tmode = insn_data[icode].operand[0].mode;
13996 machine_mode mode0 = insn_data[icode].operand[1].mode;
13998 if (icode == CODE_FOR_nothing)
13999 /* Builtin not supported on this processor. */
14000 return 0;
14002 /* If we got invalid arguments bail out before generating bad rtl. */
14003 if (arg0 == error_mark_node)
14004 return const0_rtx;
14006 if (icode == CODE_FOR_altivec_vspltisb
14007 || icode == CODE_FOR_altivec_vspltish
14008 || icode == CODE_FOR_altivec_vspltisw)
14010 /* Only allow 5-bit *signed* literals. */
14011 if (GET_CODE (op0) != CONST_INT
14012 || INTVAL (op0) > 15
14013 || INTVAL (op0) < -16)
14015 error ("argument 1 must be a 5-bit signed literal");
14016 return CONST0_RTX (tmode);
14020 if (target == 0
14021 || GET_MODE (target) != tmode
14022 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14023 target = gen_reg_rtx (tmode);
14025 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14026 op0 = copy_to_mode_reg (mode0, op0);
14028 pat = GEN_FCN (icode) (target, op0);
14029 if (! pat)
14030 return 0;
14031 emit_insn (pat);
14033 return target;
14036 static rtx
14037 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14039 rtx pat, scratch1, scratch2;
14040 tree arg0 = CALL_EXPR_ARG (exp, 0);
14041 rtx op0 = expand_normal (arg0);
14042 machine_mode tmode = insn_data[icode].operand[0].mode;
14043 machine_mode mode0 = insn_data[icode].operand[1].mode;
14045 /* If we have invalid arguments, bail out before generating bad rtl. */
14046 if (arg0 == error_mark_node)
14047 return const0_rtx;
14049 if (target == 0
14050 || GET_MODE (target) != tmode
14051 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14052 target = gen_reg_rtx (tmode);
14054 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14055 op0 = copy_to_mode_reg (mode0, op0);
14057 scratch1 = gen_reg_rtx (mode0);
14058 scratch2 = gen_reg_rtx (mode0);
14060 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14061 if (! pat)
14062 return 0;
14063 emit_insn (pat);
14065 return target;
14068 static rtx
14069 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14071 rtx pat;
14072 tree arg0 = CALL_EXPR_ARG (exp, 0);
14073 tree arg1 = CALL_EXPR_ARG (exp, 1);
14074 rtx op0 = expand_normal (arg0);
14075 rtx op1 = expand_normal (arg1);
14076 machine_mode tmode = insn_data[icode].operand[0].mode;
14077 machine_mode mode0 = insn_data[icode].operand[1].mode;
14078 machine_mode mode1 = insn_data[icode].operand[2].mode;
14080 if (icode == CODE_FOR_nothing)
14081 /* Builtin not supported on this processor. */
14082 return 0;
14084 /* If we got invalid arguments bail out before generating bad rtl. */
14085 if (arg0 == error_mark_node || arg1 == error_mark_node)
14086 return const0_rtx;
14088 if (icode == CODE_FOR_altivec_vcfux
14089 || icode == CODE_FOR_altivec_vcfsx
14090 || icode == CODE_FOR_altivec_vctsxs
14091 || icode == CODE_FOR_altivec_vctuxs
14092 || icode == CODE_FOR_altivec_vspltb
14093 || icode == CODE_FOR_altivec_vsplth
14094 || icode == CODE_FOR_altivec_vspltw)
14096 /* Only allow 5-bit unsigned literals. */
14097 STRIP_NOPS (arg1);
14098 if (TREE_CODE (arg1) != INTEGER_CST
14099 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14101 error ("argument 2 must be a 5-bit unsigned literal");
14102 return CONST0_RTX (tmode);
14105 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14106 || icode == CODE_FOR_dfptstsfi_lt_dd
14107 || icode == CODE_FOR_dfptstsfi_gt_dd
14108 || icode == CODE_FOR_dfptstsfi_unordered_dd
14109 || icode == CODE_FOR_dfptstsfi_eq_td
14110 || icode == CODE_FOR_dfptstsfi_lt_td
14111 || icode == CODE_FOR_dfptstsfi_gt_td
14112 || icode == CODE_FOR_dfptstsfi_unordered_td)
14114 /* Only allow 6-bit unsigned literals. */
14115 STRIP_NOPS (arg0);
14116 if (TREE_CODE (arg0) != INTEGER_CST
14117 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14119 error ("argument 1 must be a 6-bit unsigned literal");
14120 return CONST0_RTX (tmode);
14123 else if (icode == CODE_FOR_xststdcqp_kf
14124 || icode == CODE_FOR_xststdcqp_tf
14125 || icode == CODE_FOR_xststdcdp
14126 || icode == CODE_FOR_xststdcsp
14127 || icode == CODE_FOR_xvtstdcdp
14128 || icode == CODE_FOR_xvtstdcsp)
14130 /* Only allow 7-bit unsigned literals. */
14131 STRIP_NOPS (arg1);
14132 if (TREE_CODE (arg1) != INTEGER_CST
14133 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14135 error ("argument 2 must be a 7-bit unsigned literal");
14136 return CONST0_RTX (tmode);
14139 else if (icode == CODE_FOR_unpackv1ti
14140 || icode == CODE_FOR_unpackkf
14141 || icode == CODE_FOR_unpacktf
14142 || icode == CODE_FOR_unpackif
14143 || icode == CODE_FOR_unpacktd)
14145 /* Only allow 1-bit unsigned literals. */
14146 STRIP_NOPS (arg1);
14147 if (TREE_CODE (arg1) != INTEGER_CST
14148 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14150 error ("argument 2 must be a 1-bit unsigned literal");
14151 return CONST0_RTX (tmode);
14155 if (target == 0
14156 || GET_MODE (target) != tmode
14157 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14158 target = gen_reg_rtx (tmode);
14160 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14161 op0 = copy_to_mode_reg (mode0, op0);
14162 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14163 op1 = copy_to_mode_reg (mode1, op1);
14165 pat = GEN_FCN (icode) (target, op0, op1);
14166 if (! pat)
14167 return 0;
14168 emit_insn (pat);
14170 return target;
14173 static rtx
14174 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14176 rtx pat, scratch;
14177 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14178 tree arg0 = CALL_EXPR_ARG (exp, 1);
14179 tree arg1 = CALL_EXPR_ARG (exp, 2);
14180 rtx op0 = expand_normal (arg0);
14181 rtx op1 = expand_normal (arg1);
14182 machine_mode tmode = SImode;
14183 machine_mode mode0 = insn_data[icode].operand[1].mode;
14184 machine_mode mode1 = insn_data[icode].operand[2].mode;
14185 int cr6_form_int;
14187 if (TREE_CODE (cr6_form) != INTEGER_CST)
14189 error ("argument 1 of %qs must be a constant",
14190 "__builtin_altivec_predicate");
14191 return const0_rtx;
14193 else
14194 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14196 gcc_assert (mode0 == mode1);
14198 /* If we have invalid arguments, bail out before generating bad rtl. */
14199 if (arg0 == error_mark_node || arg1 == error_mark_node)
14200 return const0_rtx;
14202 if (target == 0
14203 || GET_MODE (target) != tmode
14204 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14205 target = gen_reg_rtx (tmode);
14207 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14208 op0 = copy_to_mode_reg (mode0, op0);
14209 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14210 op1 = copy_to_mode_reg (mode1, op1);
14212 /* Note that for many of the relevant operations (e.g. cmpne or
14213 cmpeq) with float or double operands, it makes more sense for the
14214 mode of the allocated scratch register to select a vector of
14215 integer. But the choice to copy the mode of operand 0 was made
14216 long ago and there are no plans to change it. */
14217 scratch = gen_reg_rtx (mode0);
14219 pat = GEN_FCN (icode) (scratch, op0, op1);
14220 if (! pat)
14221 return 0;
14222 emit_insn (pat);
14224 /* The vec_any* and vec_all* predicates use the same opcodes for two
14225 different operations, but the bits in CR6 will be different
14226 depending on what information we want. So we have to play tricks
14227 with CR6 to get the right bits out.
14229 If you think this is disgusting, look at the specs for the
14230 AltiVec predicates. */
14232 switch (cr6_form_int)
14234 case 0:
14235 emit_insn (gen_cr6_test_for_zero (target));
14236 break;
14237 case 1:
14238 emit_insn (gen_cr6_test_for_zero_reverse (target));
14239 break;
14240 case 2:
14241 emit_insn (gen_cr6_test_for_lt (target));
14242 break;
14243 case 3:
14244 emit_insn (gen_cr6_test_for_lt_reverse (target));
14245 break;
14246 default:
14247 error ("argument 1 of %qs is out of range",
14248 "__builtin_altivec_predicate");
14249 break;
14252 return target;
14255 static rtx
14256 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14258 rtx pat, addr;
14259 tree arg0 = CALL_EXPR_ARG (exp, 0);
14260 tree arg1 = CALL_EXPR_ARG (exp, 1);
14261 machine_mode tmode = insn_data[icode].operand[0].mode;
14262 machine_mode mode0 = Pmode;
14263 machine_mode mode1 = Pmode;
14264 rtx op0 = expand_normal (arg0);
14265 rtx op1 = expand_normal (arg1);
14267 if (icode == CODE_FOR_nothing)
14268 /* Builtin not supported on this processor. */
14269 return 0;
14271 /* If we got invalid arguments bail out before generating bad rtl. */
14272 if (arg0 == error_mark_node || arg1 == error_mark_node)
14273 return const0_rtx;
14275 if (target == 0
14276 || GET_MODE (target) != tmode
14277 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14278 target = gen_reg_rtx (tmode);
14280 op1 = copy_to_mode_reg (mode1, op1);
14282 if (op0 == const0_rtx)
14284 addr = gen_rtx_MEM (tmode, op1);
14286 else
14288 op0 = copy_to_mode_reg (mode0, op0);
14289 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14292 pat = GEN_FCN (icode) (target, addr);
14294 if (! pat)
14295 return 0;
14296 emit_insn (pat);
14298 return target;
14301 /* Return a constant vector for use as a little-endian permute control vector
14302 to reverse the order of elements of the given vector mode. */
14303 static rtx
14304 swap_selector_for_mode (machine_mode mode)
14306 /* These are little endian vectors, so their elements are reversed
14307 from what you would normally expect for a permute control vector. */
14308 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14309 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14310 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14311 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14312 unsigned int *swaparray, i;
14313 rtx perm[16];
14315 switch (mode)
14317 case E_V2DFmode:
14318 case E_V2DImode:
14319 swaparray = swap2;
14320 break;
14321 case E_V4SFmode:
14322 case E_V4SImode:
14323 swaparray = swap4;
14324 break;
14325 case E_V8HImode:
14326 swaparray = swap8;
14327 break;
14328 case E_V16QImode:
14329 swaparray = swap16;
14330 break;
14331 default:
14332 gcc_unreachable ();
14335 for (i = 0; i < 16; ++i)
14336 perm[i] = GEN_INT (swaparray[i]);
14338 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14342 swap_endian_selector_for_mode (machine_mode mode)
14344 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14345 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14346 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14347 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14349 unsigned int *swaparray, i;
14350 rtx perm[16];
14352 switch (mode)
14354 case E_V1TImode:
14355 swaparray = swap1;
14356 break;
14357 case E_V2DFmode:
14358 case E_V2DImode:
14359 swaparray = swap2;
14360 break;
14361 case E_V4SFmode:
14362 case E_V4SImode:
14363 swaparray = swap4;
14364 break;
14365 case E_V8HImode:
14366 swaparray = swap8;
14367 break;
14368 default:
14369 gcc_unreachable ();
14372 for (i = 0; i < 16; ++i)
14373 perm[i] = GEN_INT (swaparray[i]);
14375 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
14376 gen_rtvec_v (16, perm)));
14379 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14380 with -maltivec=be specified. Issue the load followed by an element-
14381 reversing permute. */
14382 void
14383 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14385 rtx tmp = gen_reg_rtx (mode);
14386 rtx load = gen_rtx_SET (tmp, op1);
14387 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14388 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14389 rtx sel = swap_selector_for_mode (mode);
14390 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14392 gcc_assert (REG_P (op0));
14393 emit_insn (par);
14394 emit_insn (gen_rtx_SET (op0, vperm));
14397 /* Generate code for a "stvxl" built-in for a little endian target with
14398 -maltivec=be specified. Issue the store preceded by an element-reversing
14399 permute. */
14400 void
14401 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14403 rtx tmp = gen_reg_rtx (mode);
14404 rtx store = gen_rtx_SET (op0, tmp);
14405 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14406 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14407 rtx sel = swap_selector_for_mode (mode);
14408 rtx vperm;
14410 gcc_assert (REG_P (op1));
14411 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14412 emit_insn (gen_rtx_SET (tmp, vperm));
14413 emit_insn (par);
14416 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14417 specified. Issue the store preceded by an element-reversing permute. */
14418 void
14419 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14421 machine_mode inner_mode = GET_MODE_INNER (mode);
14422 rtx tmp = gen_reg_rtx (mode);
14423 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14424 rtx sel = swap_selector_for_mode (mode);
14425 rtx vperm;
14427 gcc_assert (REG_P (op1));
14428 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14429 emit_insn (gen_rtx_SET (tmp, vperm));
14430 emit_insn (gen_rtx_SET (op0, stvx));
14433 static rtx
14434 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14436 rtx pat, addr;
14437 tree arg0 = CALL_EXPR_ARG (exp, 0);
14438 tree arg1 = CALL_EXPR_ARG (exp, 1);
14439 machine_mode tmode = insn_data[icode].operand[0].mode;
14440 machine_mode mode0 = Pmode;
14441 machine_mode mode1 = Pmode;
14442 rtx op0 = expand_normal (arg0);
14443 rtx op1 = expand_normal (arg1);
14445 if (icode == CODE_FOR_nothing)
14446 /* Builtin not supported on this processor. */
14447 return 0;
14449 /* If we got invalid arguments bail out before generating bad rtl. */
14450 if (arg0 == error_mark_node || arg1 == error_mark_node)
14451 return const0_rtx;
14453 if (target == 0
14454 || GET_MODE (target) != tmode
14455 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14456 target = gen_reg_rtx (tmode);
14458 op1 = copy_to_mode_reg (mode1, op1);
14460 /* For LVX, express the RTL accurately by ANDing the address with -16.
14461 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14462 so the raw address is fine. */
14463 if (icode == CODE_FOR_altivec_lvx_v1ti
14464 || icode == CODE_FOR_altivec_lvx_v2df
14465 || icode == CODE_FOR_altivec_lvx_v2di
14466 || icode == CODE_FOR_altivec_lvx_v4sf
14467 || icode == CODE_FOR_altivec_lvx_v4si
14468 || icode == CODE_FOR_altivec_lvx_v8hi
14469 || icode == CODE_FOR_altivec_lvx_v16qi)
14471 rtx rawaddr;
14472 if (op0 == const0_rtx)
14473 rawaddr = op1;
14474 else
14476 op0 = copy_to_mode_reg (mode0, op0);
14477 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14479 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14480 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14482 /* For -maltivec=be, emit the load and follow it up with a
14483 permute to swap the elements. */
14484 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14486 rtx temp = gen_reg_rtx (tmode);
14487 emit_insn (gen_rtx_SET (temp, addr));
14489 rtx sel = swap_selector_for_mode (tmode);
14490 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14491 UNSPEC_VPERM);
14492 emit_insn (gen_rtx_SET (target, vperm));
14494 else
14495 emit_insn (gen_rtx_SET (target, addr));
14497 else
14499 if (op0 == const0_rtx)
14500 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14501 else
14503 op0 = copy_to_mode_reg (mode0, op0);
14504 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14505 gen_rtx_PLUS (Pmode, op1, op0));
14508 pat = GEN_FCN (icode) (target, addr);
14509 if (! pat)
14510 return 0;
14511 emit_insn (pat);
14514 return target;
14517 static rtx
14518 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14520 tree arg0 = CALL_EXPR_ARG (exp, 0);
14521 tree arg1 = CALL_EXPR_ARG (exp, 1);
14522 tree arg2 = CALL_EXPR_ARG (exp, 2);
14523 rtx op0 = expand_normal (arg0);
14524 rtx op1 = expand_normal (arg1);
14525 rtx op2 = expand_normal (arg2);
14526 rtx pat, addr;
14527 machine_mode tmode = insn_data[icode].operand[0].mode;
14528 machine_mode mode1 = Pmode;
14529 machine_mode mode2 = Pmode;
14531 /* Invalid arguments. Bail before doing anything stoopid! */
14532 if (arg0 == error_mark_node
14533 || arg1 == error_mark_node
14534 || arg2 == error_mark_node)
14535 return const0_rtx;
14537 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14538 op0 = copy_to_mode_reg (tmode, op0);
14540 op2 = copy_to_mode_reg (mode2, op2);
14542 if (op1 == const0_rtx)
14544 addr = gen_rtx_MEM (tmode, op2);
14546 else
14548 op1 = copy_to_mode_reg (mode1, op1);
14549 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14552 pat = GEN_FCN (icode) (addr, op0);
14553 if (pat)
14554 emit_insn (pat);
14555 return NULL_RTX;
14558 static rtx
14559 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14561 rtx pat;
14562 tree arg0 = CALL_EXPR_ARG (exp, 0);
14563 tree arg1 = CALL_EXPR_ARG (exp, 1);
14564 tree arg2 = CALL_EXPR_ARG (exp, 2);
14565 rtx op0 = expand_normal (arg0);
14566 rtx op1 = expand_normal (arg1);
14567 rtx op2 = expand_normal (arg2);
14568 machine_mode mode0 = insn_data[icode].operand[0].mode;
14569 machine_mode mode1 = insn_data[icode].operand[1].mode;
14570 machine_mode mode2 = insn_data[icode].operand[2].mode;
14572 if (icode == CODE_FOR_nothing)
14573 /* Builtin not supported on this processor. */
14574 return NULL_RTX;
14576 /* If we got invalid arguments bail out before generating bad rtl. */
14577 if (arg0 == error_mark_node
14578 || arg1 == error_mark_node
14579 || arg2 == error_mark_node)
14580 return NULL_RTX;
14582 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14583 op0 = copy_to_mode_reg (mode0, op0);
14584 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14585 op1 = copy_to_mode_reg (mode1, op1);
14586 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14587 op2 = copy_to_mode_reg (mode2, op2);
14589 pat = GEN_FCN (icode) (op0, op1, op2);
14590 if (pat)
14591 emit_insn (pat);
14593 return NULL_RTX;
14596 static rtx
14597 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14599 tree arg0 = CALL_EXPR_ARG (exp, 0);
14600 tree arg1 = CALL_EXPR_ARG (exp, 1);
14601 tree arg2 = CALL_EXPR_ARG (exp, 2);
14602 rtx op0 = expand_normal (arg0);
14603 rtx op1 = expand_normal (arg1);
14604 rtx op2 = expand_normal (arg2);
14605 rtx pat, addr, rawaddr;
14606 machine_mode tmode = insn_data[icode].operand[0].mode;
14607 machine_mode smode = insn_data[icode].operand[1].mode;
14608 machine_mode mode1 = Pmode;
14609 machine_mode mode2 = Pmode;
14611 /* Invalid arguments. Bail before doing anything stoopid! */
14612 if (arg0 == error_mark_node
14613 || arg1 == error_mark_node
14614 || arg2 == error_mark_node)
14615 return const0_rtx;
14617 op2 = copy_to_mode_reg (mode2, op2);
14619 /* For STVX, express the RTL accurately by ANDing the address with -16.
14620 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14621 so the raw address is fine. */
14622 if (icode == CODE_FOR_altivec_stvx_v2df
14623 || icode == CODE_FOR_altivec_stvx_v2di
14624 || icode == CODE_FOR_altivec_stvx_v4sf
14625 || icode == CODE_FOR_altivec_stvx_v4si
14626 || icode == CODE_FOR_altivec_stvx_v8hi
14627 || icode == CODE_FOR_altivec_stvx_v16qi)
14629 if (op1 == const0_rtx)
14630 rawaddr = op2;
14631 else
14633 op1 = copy_to_mode_reg (mode1, op1);
14634 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14637 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14638 addr = gen_rtx_MEM (tmode, addr);
14640 op0 = copy_to_mode_reg (tmode, op0);
14642 /* For -maltivec=be, emit a permute to swap the elements, followed
14643 by the store. */
14644 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14646 rtx temp = gen_reg_rtx (tmode);
14647 rtx sel = swap_selector_for_mode (tmode);
14648 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14649 UNSPEC_VPERM);
14650 emit_insn (gen_rtx_SET (temp, vperm));
14651 emit_insn (gen_rtx_SET (addr, temp));
14653 else
14654 emit_insn (gen_rtx_SET (addr, op0));
14656 else
14658 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14659 op0 = copy_to_mode_reg (smode, op0);
14661 if (op1 == const0_rtx)
14662 addr = gen_rtx_MEM (tmode, op2);
14663 else
14665 op1 = copy_to_mode_reg (mode1, op1);
14666 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14669 pat = GEN_FCN (icode) (addr, op0);
14670 if (pat)
14671 emit_insn (pat);
14674 return NULL_RTX;
14677 /* Return the appropriate SPR number associated with the given builtin. */
14678 static inline HOST_WIDE_INT
14679 htm_spr_num (enum rs6000_builtins code)
14681 if (code == HTM_BUILTIN_GET_TFHAR
14682 || code == HTM_BUILTIN_SET_TFHAR)
14683 return TFHAR_SPR;
14684 else if (code == HTM_BUILTIN_GET_TFIAR
14685 || code == HTM_BUILTIN_SET_TFIAR)
14686 return TFIAR_SPR;
14687 else if (code == HTM_BUILTIN_GET_TEXASR
14688 || code == HTM_BUILTIN_SET_TEXASR)
14689 return TEXASR_SPR;
14690 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14691 || code == HTM_BUILTIN_SET_TEXASRU);
14692 return TEXASRU_SPR;
14695 /* Return the appropriate SPR regno associated with the given builtin. */
14696 static inline HOST_WIDE_INT
14697 htm_spr_regno (enum rs6000_builtins code)
14699 if (code == HTM_BUILTIN_GET_TFHAR
14700 || code == HTM_BUILTIN_SET_TFHAR)
14701 return TFHAR_REGNO;
14702 else if (code == HTM_BUILTIN_GET_TFIAR
14703 || code == HTM_BUILTIN_SET_TFIAR)
14704 return TFIAR_REGNO;
14705 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14706 || code == HTM_BUILTIN_SET_TEXASR
14707 || code == HTM_BUILTIN_GET_TEXASRU
14708 || code == HTM_BUILTIN_SET_TEXASRU);
14709 return TEXASR_REGNO;
14712 /* Return the correct ICODE value depending on whether we are
14713 setting or reading the HTM SPRs. */
14714 static inline enum insn_code
14715 rs6000_htm_spr_icode (bool nonvoid)
14717 if (nonvoid)
14718 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14719 else
14720 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14723 /* Expand the HTM builtin in EXP and store the result in TARGET.
14724 Store true in *EXPANDEDP if we found a builtin to expand. */
14725 static rtx
14726 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14728 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14729 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14730 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14731 const struct builtin_description *d;
14732 size_t i;
14734 *expandedp = true;
14736 if (!TARGET_POWERPC64
14737 && (fcode == HTM_BUILTIN_TABORTDC
14738 || fcode == HTM_BUILTIN_TABORTDCI))
14740 size_t uns_fcode = (size_t)fcode;
14741 const char *name = rs6000_builtin_info[uns_fcode].name;
14742 error ("builtin %qs is only valid in 64-bit mode", name);
14743 return const0_rtx;
14746 /* Expand the HTM builtins. */
14747 d = bdesc_htm;
14748 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14749 if (d->code == fcode)
14751 rtx op[MAX_HTM_OPERANDS], pat;
14752 int nopnds = 0;
14753 tree arg;
14754 call_expr_arg_iterator iter;
14755 unsigned attr = rs6000_builtin_info[fcode].attr;
14756 enum insn_code icode = d->icode;
14757 const struct insn_operand_data *insn_op;
14758 bool uses_spr = (attr & RS6000_BTC_SPR);
14759 rtx cr = NULL_RTX;
14761 if (uses_spr)
14762 icode = rs6000_htm_spr_icode (nonvoid);
14763 insn_op = &insn_data[icode].operand[0];
14765 if (nonvoid)
14767 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14768 if (!target
14769 || GET_MODE (target) != tmode
14770 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14771 target = gen_reg_rtx (tmode);
14772 if (uses_spr)
14773 op[nopnds++] = target;
14776 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14778 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14779 return const0_rtx;
14781 insn_op = &insn_data[icode].operand[nopnds];
14783 op[nopnds] = expand_normal (arg);
14785 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14787 if (!strcmp (insn_op->constraint, "n"))
14789 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14790 if (!CONST_INT_P (op[nopnds]))
14791 error ("argument %d must be an unsigned literal", arg_num);
14792 else
14793 error ("argument %d is an unsigned literal that is "
14794 "out of range", arg_num);
14795 return const0_rtx;
14797 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14800 nopnds++;
14803 /* Handle the builtins for extended mnemonics. These accept
14804 no arguments, but map to builtins that take arguments. */
14805 switch (fcode)
14807 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14808 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14809 op[nopnds++] = GEN_INT (1);
14810 if (flag_checking)
14811 attr |= RS6000_BTC_UNARY;
14812 break;
14813 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14814 op[nopnds++] = GEN_INT (0);
14815 if (flag_checking)
14816 attr |= RS6000_BTC_UNARY;
14817 break;
14818 default:
14819 break;
14822 /* If this builtin accesses SPRs, then pass in the appropriate
14823 SPR number and SPR regno as the last two operands. */
14824 if (uses_spr)
14826 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14827 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14828 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14830 /* If this builtin accesses a CR, then pass in a scratch
14831 CR as the last operand. */
14832 else if (attr & RS6000_BTC_CR)
14833 { cr = gen_reg_rtx (CCmode);
14834 op[nopnds++] = cr;
14837 if (flag_checking)
14839 int expected_nopnds = 0;
14840 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14841 expected_nopnds = 1;
14842 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14843 expected_nopnds = 2;
14844 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14845 expected_nopnds = 3;
14846 if (!(attr & RS6000_BTC_VOID))
14847 expected_nopnds += 1;
14848 if (uses_spr)
14849 expected_nopnds += 2;
14851 gcc_assert (nopnds == expected_nopnds
14852 && nopnds <= MAX_HTM_OPERANDS);
14855 switch (nopnds)
14857 case 1:
14858 pat = GEN_FCN (icode) (op[0]);
14859 break;
14860 case 2:
14861 pat = GEN_FCN (icode) (op[0], op[1]);
14862 break;
14863 case 3:
14864 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14865 break;
14866 case 4:
14867 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14868 break;
14869 default:
14870 gcc_unreachable ();
14872 if (!pat)
14873 return NULL_RTX;
14874 emit_insn (pat);
14876 if (attr & RS6000_BTC_CR)
14878 if (fcode == HTM_BUILTIN_TBEGIN)
14880 /* Emit code to set TARGET to true or false depending on
14881 whether the tbegin. instruction successfully or failed
14882 to start a transaction. We do this by placing the 1's
14883 complement of CR's EQ bit into TARGET. */
14884 rtx scratch = gen_reg_rtx (SImode);
14885 emit_insn (gen_rtx_SET (scratch,
14886 gen_rtx_EQ (SImode, cr,
14887 const0_rtx)));
14888 emit_insn (gen_rtx_SET (target,
14889 gen_rtx_XOR (SImode, scratch,
14890 GEN_INT (1))));
14892 else
14894 /* Emit code to copy the 4-bit condition register field
14895 CR into the least significant end of register TARGET. */
14896 rtx scratch1 = gen_reg_rtx (SImode);
14897 rtx scratch2 = gen_reg_rtx (SImode);
14898 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14899 emit_insn (gen_movcc (subreg, cr));
14900 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14901 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14905 if (nonvoid)
14906 return target;
14907 return const0_rtx;
14910 *expandedp = false;
14911 return NULL_RTX;
14914 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14916 static rtx
14917 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14918 rtx target)
14920 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14921 if (fcode == RS6000_BUILTIN_CPU_INIT)
14922 return const0_rtx;
14924 if (target == 0 || GET_MODE (target) != SImode)
14925 target = gen_reg_rtx (SImode);
14927 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14928 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14929 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14930 to a STRING_CST. */
14931 if (TREE_CODE (arg) == ARRAY_REF
14932 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14933 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14934 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14935 arg = TREE_OPERAND (arg, 0);
14937 if (TREE_CODE (arg) != STRING_CST)
14939 error ("builtin %qs only accepts a string argument",
14940 rs6000_builtin_info[(size_t) fcode].name);
14941 return const0_rtx;
14944 if (fcode == RS6000_BUILTIN_CPU_IS)
14946 const char *cpu = TREE_STRING_POINTER (arg);
14947 rtx cpuid = NULL_RTX;
14948 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14949 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14951 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14952 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14953 break;
14955 if (cpuid == NULL_RTX)
14957 /* Invalid CPU argument. */
14958 error ("cpu %qs is an invalid argument to builtin %qs",
14959 cpu, rs6000_builtin_info[(size_t) fcode].name);
14960 return const0_rtx;
14963 rtx platform = gen_reg_rtx (SImode);
14964 rtx tcbmem = gen_const_mem (SImode,
14965 gen_rtx_PLUS (Pmode,
14966 gen_rtx_REG (Pmode, TLS_REGNUM),
14967 GEN_INT (TCB_PLATFORM_OFFSET)));
14968 emit_move_insn (platform, tcbmem);
14969 emit_insn (gen_eqsi3 (target, platform, cpuid));
14971 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14973 const char *hwcap = TREE_STRING_POINTER (arg);
14974 rtx mask = NULL_RTX;
14975 int hwcap_offset;
14976 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14977 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14979 mask = GEN_INT (cpu_supports_info[i].mask);
14980 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14981 break;
14983 if (mask == NULL_RTX)
14985 /* Invalid HWCAP argument. */
14986 error ("%s %qs is an invalid argument to builtin %qs",
14987 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14988 return const0_rtx;
14991 rtx tcb_hwcap = gen_reg_rtx (SImode);
14992 rtx tcbmem = gen_const_mem (SImode,
14993 gen_rtx_PLUS (Pmode,
14994 gen_rtx_REG (Pmode, TLS_REGNUM),
14995 GEN_INT (hwcap_offset)));
14996 emit_move_insn (tcb_hwcap, tcbmem);
14997 rtx scratch1 = gen_reg_rtx (SImode);
14998 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14999 rtx scratch2 = gen_reg_rtx (SImode);
15000 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15001 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15003 else
15004 gcc_unreachable ();
15006 /* Record that we have expanded a CPU builtin, so that we can later
15007 emit a reference to the special symbol exported by LIBC to ensure we
15008 do not link against an old LIBC that doesn't support this feature. */
15009 cpu_builtin_p = true;
15011 #else
15012 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
15013 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
15015 /* For old LIBCs, always return FALSE. */
15016 emit_move_insn (target, GEN_INT (0));
15017 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15019 return target;
15022 static rtx
15023 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15025 rtx pat;
15026 tree arg0 = CALL_EXPR_ARG (exp, 0);
15027 tree arg1 = CALL_EXPR_ARG (exp, 1);
15028 tree arg2 = CALL_EXPR_ARG (exp, 2);
15029 rtx op0 = expand_normal (arg0);
15030 rtx op1 = expand_normal (arg1);
15031 rtx op2 = expand_normal (arg2);
15032 machine_mode tmode = insn_data[icode].operand[0].mode;
15033 machine_mode mode0 = insn_data[icode].operand[1].mode;
15034 machine_mode mode1 = insn_data[icode].operand[2].mode;
15035 machine_mode mode2 = insn_data[icode].operand[3].mode;
15037 if (icode == CODE_FOR_nothing)
15038 /* Builtin not supported on this processor. */
15039 return 0;
15041 /* If we got invalid arguments bail out before generating bad rtl. */
15042 if (arg0 == error_mark_node
15043 || arg1 == error_mark_node
15044 || arg2 == error_mark_node)
15045 return const0_rtx;
15047 /* Check and prepare argument depending on the instruction code.
15049 Note that a switch statement instead of the sequence of tests
15050 would be incorrect as many of the CODE_FOR values could be
15051 CODE_FOR_nothing and that would yield multiple alternatives
15052 with identical values. We'd never reach here at runtime in
15053 this case. */
15054 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15055 || icode == CODE_FOR_altivec_vsldoi_v2df
15056 || icode == CODE_FOR_altivec_vsldoi_v4si
15057 || icode == CODE_FOR_altivec_vsldoi_v8hi
15058 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15060 /* Only allow 4-bit unsigned literals. */
15061 STRIP_NOPS (arg2);
15062 if (TREE_CODE (arg2) != INTEGER_CST
15063 || TREE_INT_CST_LOW (arg2) & ~0xf)
15065 error ("argument 3 must be a 4-bit unsigned literal");
15066 return CONST0_RTX (tmode);
15069 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15070 || icode == CODE_FOR_vsx_xxpermdi_v2di
15071 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15072 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15073 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15074 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15075 || icode == CODE_FOR_vsx_xxpermdi_v4si
15076 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15077 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15078 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15079 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15080 || icode == CODE_FOR_vsx_xxsldwi_v4si
15081 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15082 || icode == CODE_FOR_vsx_xxsldwi_v2di
15083 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15085 /* Only allow 2-bit unsigned literals. */
15086 STRIP_NOPS (arg2);
15087 if (TREE_CODE (arg2) != INTEGER_CST
15088 || TREE_INT_CST_LOW (arg2) & ~0x3)
15090 error ("argument 3 must be a 2-bit unsigned literal");
15091 return CONST0_RTX (tmode);
15094 else if (icode == CODE_FOR_vsx_set_v2df
15095 || icode == CODE_FOR_vsx_set_v2di
15096 || icode == CODE_FOR_bcdadd
15097 || icode == CODE_FOR_bcdadd_lt
15098 || icode == CODE_FOR_bcdadd_eq
15099 || icode == CODE_FOR_bcdadd_gt
15100 || icode == CODE_FOR_bcdsub
15101 || icode == CODE_FOR_bcdsub_lt
15102 || icode == CODE_FOR_bcdsub_eq
15103 || icode == CODE_FOR_bcdsub_gt)
15105 /* Only allow 1-bit unsigned literals. */
15106 STRIP_NOPS (arg2);
15107 if (TREE_CODE (arg2) != INTEGER_CST
15108 || TREE_INT_CST_LOW (arg2) & ~0x1)
15110 error ("argument 3 must be a 1-bit unsigned literal");
15111 return CONST0_RTX (tmode);
15114 else if (icode == CODE_FOR_dfp_ddedpd_dd
15115 || icode == CODE_FOR_dfp_ddedpd_td)
15117 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15118 STRIP_NOPS (arg0);
15119 if (TREE_CODE (arg0) != INTEGER_CST
15120 || TREE_INT_CST_LOW (arg2) & ~0x3)
15122 error ("argument 1 must be 0 or 2");
15123 return CONST0_RTX (tmode);
15126 else if (icode == CODE_FOR_dfp_denbcd_dd
15127 || icode == CODE_FOR_dfp_denbcd_td)
15129 /* Only allow 1-bit unsigned literals. */
15130 STRIP_NOPS (arg0);
15131 if (TREE_CODE (arg0) != INTEGER_CST
15132 || TREE_INT_CST_LOW (arg0) & ~0x1)
15134 error ("argument 1 must be a 1-bit unsigned literal");
15135 return CONST0_RTX (tmode);
15138 else if (icode == CODE_FOR_dfp_dscli_dd
15139 || icode == CODE_FOR_dfp_dscli_td
15140 || icode == CODE_FOR_dfp_dscri_dd
15141 || icode == CODE_FOR_dfp_dscri_td)
15143 /* Only allow 6-bit unsigned literals. */
15144 STRIP_NOPS (arg1);
15145 if (TREE_CODE (arg1) != INTEGER_CST
15146 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15148 error ("argument 2 must be a 6-bit unsigned literal");
15149 return CONST0_RTX (tmode);
15152 else if (icode == CODE_FOR_crypto_vshasigmaw
15153 || icode == CODE_FOR_crypto_vshasigmad)
15155 /* Check whether the 2nd and 3rd arguments are integer constants and in
15156 range and prepare arguments. */
15157 STRIP_NOPS (arg1);
15158 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
15160 error ("argument 2 must be 0 or 1");
15161 return CONST0_RTX (tmode);
15164 STRIP_NOPS (arg2);
15165 if (TREE_CODE (arg2) != INTEGER_CST
15166 || wi::geu_p (wi::to_wide (arg2), 16))
15168 error ("argument 3 must be in the range 0..15");
15169 return CONST0_RTX (tmode);
15173 if (target == 0
15174 || GET_MODE (target) != tmode
15175 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15176 target = gen_reg_rtx (tmode);
15178 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15179 op0 = copy_to_mode_reg (mode0, op0);
15180 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15181 op1 = copy_to_mode_reg (mode1, op1);
15182 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15183 op2 = copy_to_mode_reg (mode2, op2);
15185 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15186 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15187 else
15188 pat = GEN_FCN (icode) (target, op0, op1, op2);
15189 if (! pat)
15190 return 0;
15191 emit_insn (pat);
15193 return target;
15197 /* Expand the dst builtins. */
15198 static rtx
15199 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15200 bool *expandedp)
15202 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15203 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15204 tree arg0, arg1, arg2;
15205 machine_mode mode0, mode1;
15206 rtx pat, op0, op1, op2;
15207 const struct builtin_description *d;
15208 size_t i;
15210 *expandedp = false;
15212 /* Handle DST variants. */
15213 d = bdesc_dst;
15214 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15215 if (d->code == fcode)
15217 arg0 = CALL_EXPR_ARG (exp, 0);
15218 arg1 = CALL_EXPR_ARG (exp, 1);
15219 arg2 = CALL_EXPR_ARG (exp, 2);
15220 op0 = expand_normal (arg0);
15221 op1 = expand_normal (arg1);
15222 op2 = expand_normal (arg2);
15223 mode0 = insn_data[d->icode].operand[0].mode;
15224 mode1 = insn_data[d->icode].operand[1].mode;
15226 /* Invalid arguments, bail out before generating bad rtl. */
15227 if (arg0 == error_mark_node
15228 || arg1 == error_mark_node
15229 || arg2 == error_mark_node)
15230 return const0_rtx;
15232 *expandedp = true;
15233 STRIP_NOPS (arg2);
15234 if (TREE_CODE (arg2) != INTEGER_CST
15235 || TREE_INT_CST_LOW (arg2) & ~0x3)
15237 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15238 return const0_rtx;
15241 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15242 op0 = copy_to_mode_reg (Pmode, op0);
15243 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15244 op1 = copy_to_mode_reg (mode1, op1);
15246 pat = GEN_FCN (d->icode) (op0, op1, op2);
15247 if (pat != 0)
15248 emit_insn (pat);
15250 return NULL_RTX;
15253 return NULL_RTX;
15256 /* Expand vec_init builtin. */
15257 static rtx
15258 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15260 machine_mode tmode = TYPE_MODE (type);
15261 machine_mode inner_mode = GET_MODE_INNER (tmode);
15262 int i, n_elt = GET_MODE_NUNITS (tmode);
15264 gcc_assert (VECTOR_MODE_P (tmode));
15265 gcc_assert (n_elt == call_expr_nargs (exp));
15267 if (!target || !register_operand (target, tmode))
15268 target = gen_reg_rtx (tmode);
15270 /* If we have a vector compromised of a single element, such as V1TImode, do
15271 the initialization directly. */
15272 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15274 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15275 emit_move_insn (target, gen_lowpart (tmode, x));
15277 else
15279 rtvec v = rtvec_alloc (n_elt);
15281 for (i = 0; i < n_elt; ++i)
15283 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15284 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15287 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15290 return target;
15293 /* Return the integer constant in ARG. Constrain it to be in the range
15294 of the subparts of VEC_TYPE; issue an error if not. */
15296 static int
15297 get_element_number (tree vec_type, tree arg)
15299 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15301 if (!tree_fits_uhwi_p (arg)
15302 || (elt = tree_to_uhwi (arg), elt > max))
15304 error ("selector must be an integer constant in the range 0..%wi", max);
15305 return 0;
15308 return elt;
15311 /* Expand vec_set builtin. */
15312 static rtx
15313 altivec_expand_vec_set_builtin (tree exp)
15315 machine_mode tmode, mode1;
15316 tree arg0, arg1, arg2;
15317 int elt;
15318 rtx op0, op1;
15320 arg0 = CALL_EXPR_ARG (exp, 0);
15321 arg1 = CALL_EXPR_ARG (exp, 1);
15322 arg2 = CALL_EXPR_ARG (exp, 2);
15324 tmode = TYPE_MODE (TREE_TYPE (arg0));
15325 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15326 gcc_assert (VECTOR_MODE_P (tmode));
15328 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15329 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15330 elt = get_element_number (TREE_TYPE (arg0), arg2);
15332 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15333 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15335 op0 = force_reg (tmode, op0);
15336 op1 = force_reg (mode1, op1);
15338 rs6000_expand_vector_set (op0, op1, elt);
15340 return op0;
15343 /* Expand vec_ext builtin. */
15344 static rtx
15345 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15347 machine_mode tmode, mode0;
15348 tree arg0, arg1;
15349 rtx op0;
15350 rtx op1;
15352 arg0 = CALL_EXPR_ARG (exp, 0);
15353 arg1 = CALL_EXPR_ARG (exp, 1);
15355 op0 = expand_normal (arg0);
15356 op1 = expand_normal (arg1);
15358 /* Call get_element_number to validate arg1 if it is a constant. */
15359 if (TREE_CODE (arg1) == INTEGER_CST)
15360 (void) get_element_number (TREE_TYPE (arg0), arg1);
15362 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15363 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15364 gcc_assert (VECTOR_MODE_P (mode0));
15366 op0 = force_reg (mode0, op0);
15368 if (optimize || !target || !register_operand (target, tmode))
15369 target = gen_reg_rtx (tmode);
15371 rs6000_expand_vector_extract (target, op0, op1);
15373 return target;
15376 /* Expand the builtin in EXP and store the result in TARGET. Store
15377 true in *EXPANDEDP if we found a builtin to expand. */
15378 static rtx
15379 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15381 const struct builtin_description *d;
15382 size_t i;
15383 enum insn_code icode;
15384 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15385 tree arg0, arg1, arg2;
15386 rtx op0, pat;
15387 machine_mode tmode, mode0;
15388 enum rs6000_builtins fcode
15389 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15391 if (rs6000_overloaded_builtin_p (fcode))
15393 *expandedp = true;
15394 error ("unresolved overload for Altivec builtin %qF", fndecl);
15396 /* Given it is invalid, just generate a normal call. */
15397 return expand_call (exp, target, false);
15400 target = altivec_expand_dst_builtin (exp, target, expandedp);
15401 if (*expandedp)
15402 return target;
15404 *expandedp = true;
15406 switch (fcode)
15408 case ALTIVEC_BUILTIN_STVX_V2DF:
15409 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
15410 case ALTIVEC_BUILTIN_STVX_V2DI:
15411 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
15412 case ALTIVEC_BUILTIN_STVX_V4SF:
15413 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
15414 case ALTIVEC_BUILTIN_STVX:
15415 case ALTIVEC_BUILTIN_STVX_V4SI:
15416 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
15417 case ALTIVEC_BUILTIN_STVX_V8HI:
15418 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
15419 case ALTIVEC_BUILTIN_STVX_V16QI:
15420 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
15421 case ALTIVEC_BUILTIN_STVEBX:
15422 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15423 case ALTIVEC_BUILTIN_STVEHX:
15424 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15425 case ALTIVEC_BUILTIN_STVEWX:
15426 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15427 case ALTIVEC_BUILTIN_STVXL_V2DF:
15428 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15429 case ALTIVEC_BUILTIN_STVXL_V2DI:
15430 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15431 case ALTIVEC_BUILTIN_STVXL_V4SF:
15432 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15433 case ALTIVEC_BUILTIN_STVXL:
15434 case ALTIVEC_BUILTIN_STVXL_V4SI:
15435 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15436 case ALTIVEC_BUILTIN_STVXL_V8HI:
15437 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15438 case ALTIVEC_BUILTIN_STVXL_V16QI:
15439 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15441 case ALTIVEC_BUILTIN_STVLX:
15442 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15443 case ALTIVEC_BUILTIN_STVLXL:
15444 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15445 case ALTIVEC_BUILTIN_STVRX:
15446 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15447 case ALTIVEC_BUILTIN_STVRXL:
15448 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15450 case P9V_BUILTIN_STXVL:
15451 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15453 case P9V_BUILTIN_XST_LEN_R:
15454 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
15456 case VSX_BUILTIN_STXVD2X_V1TI:
15457 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15458 case VSX_BUILTIN_STXVD2X_V2DF:
15459 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15460 case VSX_BUILTIN_STXVD2X_V2DI:
15461 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15462 case VSX_BUILTIN_STXVW4X_V4SF:
15463 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15464 case VSX_BUILTIN_STXVW4X_V4SI:
15465 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15466 case VSX_BUILTIN_STXVW4X_V8HI:
15467 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15468 case VSX_BUILTIN_STXVW4X_V16QI:
15469 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15471 /* For the following on big endian, it's ok to use any appropriate
15472 unaligned-supporting store, so use a generic expander. For
15473 little-endian, the exact element-reversing instruction must
15474 be used. */
15475 case VSX_BUILTIN_ST_ELEMREV_V1TI:
15477 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
15478 : CODE_FOR_vsx_st_elemrev_v1ti);
15479 return altivec_expand_stv_builtin (code, exp);
15481 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15483 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15484 : CODE_FOR_vsx_st_elemrev_v2df);
15485 return altivec_expand_stv_builtin (code, exp);
15487 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15489 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15490 : CODE_FOR_vsx_st_elemrev_v2di);
15491 return altivec_expand_stv_builtin (code, exp);
15493 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15495 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15496 : CODE_FOR_vsx_st_elemrev_v4sf);
15497 return altivec_expand_stv_builtin (code, exp);
15499 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15501 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15502 : CODE_FOR_vsx_st_elemrev_v4si);
15503 return altivec_expand_stv_builtin (code, exp);
15505 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15507 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15508 : CODE_FOR_vsx_st_elemrev_v8hi);
15509 return altivec_expand_stv_builtin (code, exp);
15511 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15513 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15514 : CODE_FOR_vsx_st_elemrev_v16qi);
15515 return altivec_expand_stv_builtin (code, exp);
15518 case ALTIVEC_BUILTIN_MFVSCR:
15519 icode = CODE_FOR_altivec_mfvscr;
15520 tmode = insn_data[icode].operand[0].mode;
15522 if (target == 0
15523 || GET_MODE (target) != tmode
15524 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15525 target = gen_reg_rtx (tmode);
15527 pat = GEN_FCN (icode) (target);
15528 if (! pat)
15529 return 0;
15530 emit_insn (pat);
15531 return target;
15533 case ALTIVEC_BUILTIN_MTVSCR:
15534 icode = CODE_FOR_altivec_mtvscr;
15535 arg0 = CALL_EXPR_ARG (exp, 0);
15536 op0 = expand_normal (arg0);
15537 mode0 = insn_data[icode].operand[0].mode;
15539 /* If we got invalid arguments bail out before generating bad rtl. */
15540 if (arg0 == error_mark_node)
15541 return const0_rtx;
15543 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15544 op0 = copy_to_mode_reg (mode0, op0);
15546 pat = GEN_FCN (icode) (op0);
15547 if (pat)
15548 emit_insn (pat);
15549 return NULL_RTX;
15551 case ALTIVEC_BUILTIN_DSSALL:
15552 emit_insn (gen_altivec_dssall ());
15553 return NULL_RTX;
15555 case ALTIVEC_BUILTIN_DSS:
15556 icode = CODE_FOR_altivec_dss;
15557 arg0 = CALL_EXPR_ARG (exp, 0);
15558 STRIP_NOPS (arg0);
15559 op0 = expand_normal (arg0);
15560 mode0 = insn_data[icode].operand[0].mode;
15562 /* If we got invalid arguments bail out before generating bad rtl. */
15563 if (arg0 == error_mark_node)
15564 return const0_rtx;
15566 if (TREE_CODE (arg0) != INTEGER_CST
15567 || TREE_INT_CST_LOW (arg0) & ~0x3)
15569 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15570 return const0_rtx;
15573 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15574 op0 = copy_to_mode_reg (mode0, op0);
15576 emit_insn (gen_altivec_dss (op0));
15577 return NULL_RTX;
15579 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15580 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15581 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15582 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15583 case VSX_BUILTIN_VEC_INIT_V2DF:
15584 case VSX_BUILTIN_VEC_INIT_V2DI:
15585 case VSX_BUILTIN_VEC_INIT_V1TI:
15586 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15588 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15589 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15590 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15591 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15592 case VSX_BUILTIN_VEC_SET_V2DF:
15593 case VSX_BUILTIN_VEC_SET_V2DI:
15594 case VSX_BUILTIN_VEC_SET_V1TI:
15595 return altivec_expand_vec_set_builtin (exp);
15597 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15598 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15599 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15600 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15601 case VSX_BUILTIN_VEC_EXT_V2DF:
15602 case VSX_BUILTIN_VEC_EXT_V2DI:
15603 case VSX_BUILTIN_VEC_EXT_V1TI:
15604 return altivec_expand_vec_ext_builtin (exp, target);
15606 case P9V_BUILTIN_VEC_EXTRACT4B:
15607 arg1 = CALL_EXPR_ARG (exp, 1);
15608 STRIP_NOPS (arg1);
15610 /* Generate a normal call if it is invalid. */
15611 if (arg1 == error_mark_node)
15612 return expand_call (exp, target, false);
15614 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15616 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15617 return expand_call (exp, target, false);
15619 break;
15621 case P9V_BUILTIN_VEC_INSERT4B:
15622 arg2 = CALL_EXPR_ARG (exp, 2);
15623 STRIP_NOPS (arg2);
15625 /* Generate a normal call if it is invalid. */
15626 if (arg2 == error_mark_node)
15627 return expand_call (exp, target, false);
15629 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15631 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15632 return expand_call (exp, target, false);
15634 break;
15636 default:
15637 break;
15638 /* Fall through. */
15641 /* Expand abs* operations. */
15642 d = bdesc_abs;
15643 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15644 if (d->code == fcode)
15645 return altivec_expand_abs_builtin (d->icode, exp, target);
15647 /* Expand the AltiVec predicates. */
15648 d = bdesc_altivec_preds;
15649 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15650 if (d->code == fcode)
15651 return altivec_expand_predicate_builtin (d->icode, exp, target);
15653 /* LV* are funky. We initialized them differently. */
15654 switch (fcode)
15656 case ALTIVEC_BUILTIN_LVSL:
15657 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15658 exp, target, false);
15659 case ALTIVEC_BUILTIN_LVSR:
15660 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15661 exp, target, false);
15662 case ALTIVEC_BUILTIN_LVEBX:
15663 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15664 exp, target, false);
15665 case ALTIVEC_BUILTIN_LVEHX:
15666 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15667 exp, target, false);
15668 case ALTIVEC_BUILTIN_LVEWX:
15669 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15670 exp, target, false);
15671 case ALTIVEC_BUILTIN_LVXL_V2DF:
15672 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15673 exp, target, false);
15674 case ALTIVEC_BUILTIN_LVXL_V2DI:
15675 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15676 exp, target, false);
15677 case ALTIVEC_BUILTIN_LVXL_V4SF:
15678 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15679 exp, target, false);
15680 case ALTIVEC_BUILTIN_LVXL:
15681 case ALTIVEC_BUILTIN_LVXL_V4SI:
15682 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15683 exp, target, false);
15684 case ALTIVEC_BUILTIN_LVXL_V8HI:
15685 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15686 exp, target, false);
15687 case ALTIVEC_BUILTIN_LVXL_V16QI:
15688 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15689 exp, target, false);
15690 case ALTIVEC_BUILTIN_LVX_V1TI:
15691 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15692 exp, target, false);
15693 case ALTIVEC_BUILTIN_LVX_V2DF:
15694 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15695 exp, target, false);
15696 case ALTIVEC_BUILTIN_LVX_V2DI:
15697 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15698 exp, target, false);
15699 case ALTIVEC_BUILTIN_LVX_V4SF:
15700 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15701 exp, target, false);
15702 case ALTIVEC_BUILTIN_LVX:
15703 case ALTIVEC_BUILTIN_LVX_V4SI:
15704 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15705 exp, target, false);
15706 case ALTIVEC_BUILTIN_LVX_V8HI:
15707 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15708 exp, target, false);
15709 case ALTIVEC_BUILTIN_LVX_V16QI:
15710 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15711 exp, target, false);
15712 case ALTIVEC_BUILTIN_LVLX:
15713 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15714 exp, target, true);
15715 case ALTIVEC_BUILTIN_LVLXL:
15716 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15717 exp, target, true);
15718 case ALTIVEC_BUILTIN_LVRX:
15719 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15720 exp, target, true);
15721 case ALTIVEC_BUILTIN_LVRXL:
15722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15723 exp, target, true);
15724 case VSX_BUILTIN_LXVD2X_V1TI:
15725 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15726 exp, target, false);
15727 case VSX_BUILTIN_LXVD2X_V2DF:
15728 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15729 exp, target, false);
15730 case VSX_BUILTIN_LXVD2X_V2DI:
15731 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15732 exp, target, false);
15733 case VSX_BUILTIN_LXVW4X_V4SF:
15734 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15735 exp, target, false);
15736 case VSX_BUILTIN_LXVW4X_V4SI:
15737 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15738 exp, target, false);
15739 case VSX_BUILTIN_LXVW4X_V8HI:
15740 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15741 exp, target, false);
15742 case VSX_BUILTIN_LXVW4X_V16QI:
15743 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15744 exp, target, false);
15745 /* For the following on big endian, it's ok to use any appropriate
15746 unaligned-supporting load, so use a generic expander. For
15747 little-endian, the exact element-reversing instruction must
15748 be used. */
15749 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15751 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15752 : CODE_FOR_vsx_ld_elemrev_v2df);
15753 return altivec_expand_lv_builtin (code, exp, target, false);
15755 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15757 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15758 : CODE_FOR_vsx_ld_elemrev_v1ti);
15759 return altivec_expand_lv_builtin (code, exp, target, false);
15761 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15763 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15764 : CODE_FOR_vsx_ld_elemrev_v2di);
15765 return altivec_expand_lv_builtin (code, exp, target, false);
15767 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15769 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15770 : CODE_FOR_vsx_ld_elemrev_v4sf);
15771 return altivec_expand_lv_builtin (code, exp, target, false);
15773 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15775 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15776 : CODE_FOR_vsx_ld_elemrev_v4si);
15777 return altivec_expand_lv_builtin (code, exp, target, false);
15779 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15781 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15782 : CODE_FOR_vsx_ld_elemrev_v8hi);
15783 return altivec_expand_lv_builtin (code, exp, target, false);
15785 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15787 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15788 : CODE_FOR_vsx_ld_elemrev_v16qi);
15789 return altivec_expand_lv_builtin (code, exp, target, false);
15791 break;
15792 default:
15793 break;
15794 /* Fall through. */
15797 *expandedp = false;
15798 return NULL_RTX;
15801 /* Expand the builtin in EXP and store the result in TARGET. Store
15802 true in *EXPANDEDP if we found a builtin to expand. */
15803 static rtx
15804 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15806 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15807 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15808 const struct builtin_description *d;
15809 size_t i;
15811 *expandedp = true;
15813 switch (fcode)
15815 case PAIRED_BUILTIN_STX:
15816 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15817 case PAIRED_BUILTIN_LX:
15818 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15819 default:
15820 break;
15821 /* Fall through. */
15824 /* Expand the paired predicates. */
15825 d = bdesc_paired_preds;
15826 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15827 if (d->code == fcode)
15828 return paired_expand_predicate_builtin (d->icode, exp, target);
15830 *expandedp = false;
15831 return NULL_RTX;
15834 static rtx
15835 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15837 rtx pat, scratch, tmp;
15838 tree form = CALL_EXPR_ARG (exp, 0);
15839 tree arg0 = CALL_EXPR_ARG (exp, 1);
15840 tree arg1 = CALL_EXPR_ARG (exp, 2);
15841 rtx op0 = expand_normal (arg0);
15842 rtx op1 = expand_normal (arg1);
15843 machine_mode mode0 = insn_data[icode].operand[1].mode;
15844 machine_mode mode1 = insn_data[icode].operand[2].mode;
15845 int form_int;
15846 enum rtx_code code;
15848 if (TREE_CODE (form) != INTEGER_CST)
15850 error ("argument 1 of %s must be a constant",
15851 "__builtin_paired_predicate");
15852 return const0_rtx;
15854 else
15855 form_int = TREE_INT_CST_LOW (form);
15857 gcc_assert (mode0 == mode1);
15859 if (arg0 == error_mark_node || arg1 == error_mark_node)
15860 return const0_rtx;
15862 if (target == 0
15863 || GET_MODE (target) != SImode
15864 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15865 target = gen_reg_rtx (SImode);
15866 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15867 op0 = copy_to_mode_reg (mode0, op0);
15868 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15869 op1 = copy_to_mode_reg (mode1, op1);
15871 scratch = gen_reg_rtx (CCFPmode);
15873 pat = GEN_FCN (icode) (scratch, op0, op1);
15874 if (!pat)
15875 return const0_rtx;
15877 emit_insn (pat);
15879 switch (form_int)
15881 /* LT bit. */
15882 case 0:
15883 code = LT;
15884 break;
15885 /* GT bit. */
15886 case 1:
15887 code = GT;
15888 break;
15889 /* EQ bit. */
15890 case 2:
15891 code = EQ;
15892 break;
15893 /* UN bit. */
15894 case 3:
15895 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15896 return target;
15897 default:
15898 error ("argument 1 of %qs is out of range",
15899 "__builtin_paired_predicate");
15900 return const0_rtx;
15903 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15904 emit_move_insn (target, tmp);
15905 return target;
15908 /* Raise an error message for a builtin function that is called without the
15909 appropriate target options being set. */
15911 static void
15912 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15914 size_t uns_fncode = (size_t) fncode;
15915 const char *name = rs6000_builtin_info[uns_fncode].name;
15916 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15918 gcc_assert (name != NULL);
15919 if ((fnmask & RS6000_BTM_CELL) != 0)
15920 error ("builtin function %qs is only valid for the cell processor", name);
15921 else if ((fnmask & RS6000_BTM_VSX) != 0)
15922 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15923 else if ((fnmask & RS6000_BTM_HTM) != 0)
15924 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15925 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15926 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15927 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15928 error ("builtin function %qs requires the %qs option", name, "-mpaired");
15929 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15930 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15931 error ("builtin function %qs requires the %qs and %qs options",
15932 name, "-mhard-dfp", "-mpower8-vector");
15933 else if ((fnmask & RS6000_BTM_DFP) != 0)
15934 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15935 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15936 error ("builtin function %qs requires the %qs option", name,
15937 "-mpower8-vector");
15938 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15939 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15940 error ("builtin function %qs requires the %qs and %qs options",
15941 name, "-mcpu=power9", "-m64");
15942 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15943 error ("builtin function %qs requires the %qs option", name,
15944 "-mcpu=power9");
15945 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15946 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15947 error ("builtin function %qs requires the %qs and %qs options",
15948 name, "-mcpu=power9", "-m64");
15949 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15950 error ("builtin function %qs requires the %qs option", name,
15951 "-mcpu=power9");
15952 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15953 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15954 error ("builtin function %qs requires the %qs and %qs options",
15955 name, "-mhard-float", "-mlong-double-128");
15956 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15957 error ("builtin function %qs requires the %qs option", name,
15958 "-mhard-float");
15959 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15960 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15961 name);
15962 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15963 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15964 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15965 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15966 error ("builtin function %qs requires the %qs (or newer), and "
15967 "%qs or %qs options",
15968 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15969 else
15970 error ("builtin function %qs is not supported with the current options",
15971 name);
15974 /* Target hook for early folding of built-ins, shamelessly stolen
15975 from ia64.c. */
15977 static tree
15978 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15979 int n_args ATTRIBUTE_UNUSED,
15980 tree *args ATTRIBUTE_UNUSED,
15981 bool ignore ATTRIBUTE_UNUSED)
15983 #ifdef SUBTARGET_FOLD_BUILTIN
15984 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15985 #else
15986 return NULL_TREE;
15987 #endif
15990 /* Helper function to sort out which built-ins may be valid without having
15991 a LHS. */
15992 static bool
15993 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15995 switch (fn_code)
15997 case ALTIVEC_BUILTIN_STVX_V16QI:
15998 case ALTIVEC_BUILTIN_STVX_V8HI:
15999 case ALTIVEC_BUILTIN_STVX_V4SI:
16000 case ALTIVEC_BUILTIN_STVX_V4SF:
16001 case ALTIVEC_BUILTIN_STVX_V2DI:
16002 case ALTIVEC_BUILTIN_STVX_V2DF:
16003 return true;
16004 default:
16005 return false;
16009 /* Helper function to handle the gimple folding of a vector compare
16010 operation. This sets up true/false vectors, and uses the
16011 VEC_COND_EXPR operation.
16012 CODE indicates which comparison is to be made. (EQ, GT, ...).
16013 TYPE indicates the type of the result. */
16014 static tree
16015 fold_build_vec_cmp (tree_code code, tree type,
16016 tree arg0, tree arg1)
16018 tree cmp_type = build_same_sized_truth_vector_type (type);
16019 tree zero_vec = build_zero_cst (type);
16020 tree minus_one_vec = build_minus_one_cst (type);
16021 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
16022 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
16025 /* Helper function to handle the in-between steps for the
16026 vector compare built-ins. */
16027 static void
16028 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
16030 tree arg0 = gimple_call_arg (stmt, 0);
16031 tree arg1 = gimple_call_arg (stmt, 1);
16032 tree lhs = gimple_call_lhs (stmt);
16033 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
16034 gimple *g = gimple_build_assign (lhs, cmp);
16035 gimple_set_location (g, gimple_location (stmt));
16036 gsi_replace (gsi, g, true);
16039 /* Helper function to handle the vector merge[hl] built-ins. The
16040 implementation difference between h and l versions for this code are in
16041 the values used when building of the permute vector for high word versus
16042 low word merge. The variance is keyed off the use_high parameter. */
16043 static void
16044 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
16046 tree arg0 = gimple_call_arg (stmt, 0);
16047 tree arg1 = gimple_call_arg (stmt, 1);
16048 tree lhs = gimple_call_lhs (stmt);
16049 tree lhs_type = TREE_TYPE (lhs);
16050 tree lhs_type_type = TREE_TYPE (lhs_type);
16051 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
16052 int midpoint = n_elts / 2;
16053 int offset = 0;
16055 if (use_high == 1)
16056 offset = midpoint;
16058 tree_vector_builder elts (lhs_type, VECTOR_CST_NELTS (arg0), 1);
16060 for (int i = 0; i < midpoint; i++)
16062 elts.safe_push (build_int_cst (lhs_type_type, offset + i));
16063 elts.safe_push (build_int_cst (lhs_type_type, offset + n_elts + i));
16066 tree permute = elts.build ();
16068 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
16069 gimple_set_location (g, gimple_location (stmt));
16070 gsi_replace (gsi, g, true);
16073 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16074 a constant, use rs6000_fold_builtin.) */
16076 bool
16077 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16079 gimple *stmt = gsi_stmt (*gsi);
16080 tree fndecl = gimple_call_fndecl (stmt);
16081 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16082 enum rs6000_builtins fn_code
16083 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16084 tree arg0, arg1, lhs, temp;
16085 gimple *g;
16087 size_t uns_fncode = (size_t) fn_code;
16088 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16089 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16090 const char *fn_name2 = (icode != CODE_FOR_nothing)
16091 ? get_insn_name ((int) icode)
16092 : "nothing";
16094 if (TARGET_DEBUG_BUILTIN)
16095 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16096 fn_code, fn_name1, fn_name2);
16098 if (!rs6000_fold_gimple)
16099 return false;
16101 /* Prevent gimple folding for code that does not have a LHS, unless it is
16102 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16103 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
16104 return false;
16106 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
16107 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
16108 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
16109 if (!func_valid_p)
16110 return false;
16112 switch (fn_code)
16114 /* Flavors of vec_add. We deliberately don't expand
16115 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16116 TImode, resulting in much poorer code generation. */
16117 case ALTIVEC_BUILTIN_VADDUBM:
16118 case ALTIVEC_BUILTIN_VADDUHM:
16119 case ALTIVEC_BUILTIN_VADDUWM:
16120 case P8V_BUILTIN_VADDUDM:
16121 case ALTIVEC_BUILTIN_VADDFP:
16122 case VSX_BUILTIN_XVADDDP:
16123 arg0 = gimple_call_arg (stmt, 0);
16124 arg1 = gimple_call_arg (stmt, 1);
16125 lhs = gimple_call_lhs (stmt);
16126 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16127 gimple_set_location (g, gimple_location (stmt));
16128 gsi_replace (gsi, g, true);
16129 return true;
16130 /* Flavors of vec_sub. We deliberately don't expand
16131 P8V_BUILTIN_VSUBUQM. */
16132 case ALTIVEC_BUILTIN_VSUBUBM:
16133 case ALTIVEC_BUILTIN_VSUBUHM:
16134 case ALTIVEC_BUILTIN_VSUBUWM:
16135 case P8V_BUILTIN_VSUBUDM:
16136 case ALTIVEC_BUILTIN_VSUBFP:
16137 case VSX_BUILTIN_XVSUBDP:
16138 arg0 = gimple_call_arg (stmt, 0);
16139 arg1 = gimple_call_arg (stmt, 1);
16140 lhs = gimple_call_lhs (stmt);
16141 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16142 gimple_set_location (g, gimple_location (stmt));
16143 gsi_replace (gsi, g, true);
16144 return true;
16145 case VSX_BUILTIN_XVMULSP:
16146 case VSX_BUILTIN_XVMULDP:
16147 arg0 = gimple_call_arg (stmt, 0);
16148 arg1 = gimple_call_arg (stmt, 1);
16149 lhs = gimple_call_lhs (stmt);
16150 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16151 gimple_set_location (g, gimple_location (stmt));
16152 gsi_replace (gsi, g, true);
16153 return true;
16154 /* Even element flavors of vec_mul (signed). */
16155 case ALTIVEC_BUILTIN_VMULESB:
16156 case ALTIVEC_BUILTIN_VMULESH:
16157 case P8V_BUILTIN_VMULESW:
16158 /* Even element flavors of vec_mul (unsigned). */
16159 case ALTIVEC_BUILTIN_VMULEUB:
16160 case ALTIVEC_BUILTIN_VMULEUH:
16161 case P8V_BUILTIN_VMULEUW:
16162 arg0 = gimple_call_arg (stmt, 0);
16163 arg1 = gimple_call_arg (stmt, 1);
16164 lhs = gimple_call_lhs (stmt);
16165 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16166 gimple_set_location (g, gimple_location (stmt));
16167 gsi_replace (gsi, g, true);
16168 return true;
16169 /* Odd element flavors of vec_mul (signed). */
16170 case ALTIVEC_BUILTIN_VMULOSB:
16171 case ALTIVEC_BUILTIN_VMULOSH:
16172 case P8V_BUILTIN_VMULOSW:
16173 /* Odd element flavors of vec_mul (unsigned). */
16174 case ALTIVEC_BUILTIN_VMULOUB:
16175 case ALTIVEC_BUILTIN_VMULOUH:
16176 case P8V_BUILTIN_VMULOUW:
16177 arg0 = gimple_call_arg (stmt, 0);
16178 arg1 = gimple_call_arg (stmt, 1);
16179 lhs = gimple_call_lhs (stmt);
16180 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16181 gimple_set_location (g, gimple_location (stmt));
16182 gsi_replace (gsi, g, true);
16183 return true;
16184 /* Flavors of vec_div (Integer). */
16185 case VSX_BUILTIN_DIV_V2DI:
16186 case VSX_BUILTIN_UDIV_V2DI:
16187 arg0 = gimple_call_arg (stmt, 0);
16188 arg1 = gimple_call_arg (stmt, 1);
16189 lhs = gimple_call_lhs (stmt);
16190 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16191 gimple_set_location (g, gimple_location (stmt));
16192 gsi_replace (gsi, g, true);
16193 return true;
16194 /* Flavors of vec_div (Float). */
16195 case VSX_BUILTIN_XVDIVSP:
16196 case VSX_BUILTIN_XVDIVDP:
16197 arg0 = gimple_call_arg (stmt, 0);
16198 arg1 = gimple_call_arg (stmt, 1);
16199 lhs = gimple_call_lhs (stmt);
16200 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16201 gimple_set_location (g, gimple_location (stmt));
16202 gsi_replace (gsi, g, true);
16203 return true;
16204 /* Flavors of vec_and. */
16205 case ALTIVEC_BUILTIN_VAND:
16206 arg0 = gimple_call_arg (stmt, 0);
16207 arg1 = gimple_call_arg (stmt, 1);
16208 lhs = gimple_call_lhs (stmt);
16209 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16210 gimple_set_location (g, gimple_location (stmt));
16211 gsi_replace (gsi, g, true);
16212 return true;
16213 /* Flavors of vec_andc. */
16214 case ALTIVEC_BUILTIN_VANDC:
16215 arg0 = gimple_call_arg (stmt, 0);
16216 arg1 = gimple_call_arg (stmt, 1);
16217 lhs = gimple_call_lhs (stmt);
16218 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16219 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16220 gimple_set_location (g, gimple_location (stmt));
16221 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16222 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16223 gimple_set_location (g, gimple_location (stmt));
16224 gsi_replace (gsi, g, true);
16225 return true;
16226 /* Flavors of vec_nand. */
16227 case P8V_BUILTIN_VEC_NAND:
16228 case P8V_BUILTIN_NAND_V16QI:
16229 case P8V_BUILTIN_NAND_V8HI:
16230 case P8V_BUILTIN_NAND_V4SI:
16231 case P8V_BUILTIN_NAND_V4SF:
16232 case P8V_BUILTIN_NAND_V2DF:
16233 case P8V_BUILTIN_NAND_V2DI:
16234 arg0 = gimple_call_arg (stmt, 0);
16235 arg1 = gimple_call_arg (stmt, 1);
16236 lhs = gimple_call_lhs (stmt);
16237 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16238 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
16239 gimple_set_location (g, gimple_location (stmt));
16240 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16241 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16242 gimple_set_location (g, gimple_location (stmt));
16243 gsi_replace (gsi, g, true);
16244 return true;
16245 /* Flavors of vec_or. */
16246 case ALTIVEC_BUILTIN_VOR:
16247 arg0 = gimple_call_arg (stmt, 0);
16248 arg1 = gimple_call_arg (stmt, 1);
16249 lhs = gimple_call_lhs (stmt);
16250 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16251 gimple_set_location (g, gimple_location (stmt));
16252 gsi_replace (gsi, g, true);
16253 return true;
16254 /* flavors of vec_orc. */
16255 case P8V_BUILTIN_ORC_V16QI:
16256 case P8V_BUILTIN_ORC_V8HI:
16257 case P8V_BUILTIN_ORC_V4SI:
16258 case P8V_BUILTIN_ORC_V4SF:
16259 case P8V_BUILTIN_ORC_V2DF:
16260 case P8V_BUILTIN_ORC_V2DI:
16261 arg0 = gimple_call_arg (stmt, 0);
16262 arg1 = gimple_call_arg (stmt, 1);
16263 lhs = gimple_call_lhs (stmt);
16264 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16265 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16266 gimple_set_location (g, gimple_location (stmt));
16267 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16268 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16269 gimple_set_location (g, gimple_location (stmt));
16270 gsi_replace (gsi, g, true);
16271 return true;
16272 /* Flavors of vec_xor. */
16273 case ALTIVEC_BUILTIN_VXOR:
16274 arg0 = gimple_call_arg (stmt, 0);
16275 arg1 = gimple_call_arg (stmt, 1);
16276 lhs = gimple_call_lhs (stmt);
16277 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16278 gimple_set_location (g, gimple_location (stmt));
16279 gsi_replace (gsi, g, true);
16280 return true;
16281 /* Flavors of vec_nor. */
16282 case ALTIVEC_BUILTIN_VNOR:
16283 arg0 = gimple_call_arg (stmt, 0);
16284 arg1 = gimple_call_arg (stmt, 1);
16285 lhs = gimple_call_lhs (stmt);
16286 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16287 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16288 gimple_set_location (g, gimple_location (stmt));
16289 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16290 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16291 gimple_set_location (g, gimple_location (stmt));
16292 gsi_replace (gsi, g, true);
16293 return true;
16294 /* flavors of vec_abs. */
16295 case ALTIVEC_BUILTIN_ABS_V16QI:
16296 case ALTIVEC_BUILTIN_ABS_V8HI:
16297 case ALTIVEC_BUILTIN_ABS_V4SI:
16298 case ALTIVEC_BUILTIN_ABS_V4SF:
16299 case P8V_BUILTIN_ABS_V2DI:
16300 case VSX_BUILTIN_XVABSDP:
16301 arg0 = gimple_call_arg (stmt, 0);
16302 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16303 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16304 return false;
16305 lhs = gimple_call_lhs (stmt);
16306 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16307 gimple_set_location (g, gimple_location (stmt));
16308 gsi_replace (gsi, g, true);
16309 return true;
16310 /* flavors of vec_min. */
16311 case VSX_BUILTIN_XVMINDP:
16312 case P8V_BUILTIN_VMINSD:
16313 case P8V_BUILTIN_VMINUD:
16314 case ALTIVEC_BUILTIN_VMINSB:
16315 case ALTIVEC_BUILTIN_VMINSH:
16316 case ALTIVEC_BUILTIN_VMINSW:
16317 case ALTIVEC_BUILTIN_VMINUB:
16318 case ALTIVEC_BUILTIN_VMINUH:
16319 case ALTIVEC_BUILTIN_VMINUW:
16320 case ALTIVEC_BUILTIN_VMINFP:
16321 arg0 = gimple_call_arg (stmt, 0);
16322 arg1 = gimple_call_arg (stmt, 1);
16323 lhs = gimple_call_lhs (stmt);
16324 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16325 gimple_set_location (g, gimple_location (stmt));
16326 gsi_replace (gsi, g, true);
16327 return true;
16328 /* flavors of vec_max. */
16329 case VSX_BUILTIN_XVMAXDP:
16330 case P8V_BUILTIN_VMAXSD:
16331 case P8V_BUILTIN_VMAXUD:
16332 case ALTIVEC_BUILTIN_VMAXSB:
16333 case ALTIVEC_BUILTIN_VMAXSH:
16334 case ALTIVEC_BUILTIN_VMAXSW:
16335 case ALTIVEC_BUILTIN_VMAXUB:
16336 case ALTIVEC_BUILTIN_VMAXUH:
16337 case ALTIVEC_BUILTIN_VMAXUW:
16338 case ALTIVEC_BUILTIN_VMAXFP:
16339 arg0 = gimple_call_arg (stmt, 0);
16340 arg1 = gimple_call_arg (stmt, 1);
16341 lhs = gimple_call_lhs (stmt);
16342 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16343 gimple_set_location (g, gimple_location (stmt));
16344 gsi_replace (gsi, g, true);
16345 return true;
16346 /* Flavors of vec_eqv. */
16347 case P8V_BUILTIN_EQV_V16QI:
16348 case P8V_BUILTIN_EQV_V8HI:
16349 case P8V_BUILTIN_EQV_V4SI:
16350 case P8V_BUILTIN_EQV_V4SF:
16351 case P8V_BUILTIN_EQV_V2DF:
16352 case P8V_BUILTIN_EQV_V2DI:
16353 arg0 = gimple_call_arg (stmt, 0);
16354 arg1 = gimple_call_arg (stmt, 1);
16355 lhs = gimple_call_lhs (stmt);
16356 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16357 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16358 gimple_set_location (g, gimple_location (stmt));
16359 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16360 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16361 gimple_set_location (g, gimple_location (stmt));
16362 gsi_replace (gsi, g, true);
16363 return true;
16364 /* Flavors of vec_rotate_left. */
16365 case ALTIVEC_BUILTIN_VRLB:
16366 case ALTIVEC_BUILTIN_VRLH:
16367 case ALTIVEC_BUILTIN_VRLW:
16368 case P8V_BUILTIN_VRLD:
16369 arg0 = gimple_call_arg (stmt, 0);
16370 arg1 = gimple_call_arg (stmt, 1);
16371 lhs = gimple_call_lhs (stmt);
16372 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16373 gimple_set_location (g, gimple_location (stmt));
16374 gsi_replace (gsi, g, true);
16375 return true;
16376 /* Flavors of vector shift right algebraic.
16377 vec_sra{b,h,w} -> vsra{b,h,w}. */
16378 case ALTIVEC_BUILTIN_VSRAB:
16379 case ALTIVEC_BUILTIN_VSRAH:
16380 case ALTIVEC_BUILTIN_VSRAW:
16381 case P8V_BUILTIN_VSRAD:
16382 arg0 = gimple_call_arg (stmt, 0);
16383 arg1 = gimple_call_arg (stmt, 1);
16384 lhs = gimple_call_lhs (stmt);
16385 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16386 gimple_set_location (g, gimple_location (stmt));
16387 gsi_replace (gsi, g, true);
16388 return true;
16389 /* Flavors of vector shift left.
16390 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16391 case ALTIVEC_BUILTIN_VSLB:
16392 case ALTIVEC_BUILTIN_VSLH:
16393 case ALTIVEC_BUILTIN_VSLW:
16394 case P8V_BUILTIN_VSLD:
16395 arg0 = gimple_call_arg (stmt, 0);
16396 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16397 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16398 return false;
16399 arg1 = gimple_call_arg (stmt, 1);
16400 lhs = gimple_call_lhs (stmt);
16401 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16402 gimple_set_location (g, gimple_location (stmt));
16403 gsi_replace (gsi, g, true);
16404 return true;
16405 /* Flavors of vector shift right. */
16406 case ALTIVEC_BUILTIN_VSRB:
16407 case ALTIVEC_BUILTIN_VSRH:
16408 case ALTIVEC_BUILTIN_VSRW:
16409 case P8V_BUILTIN_VSRD:
16411 arg0 = gimple_call_arg (stmt, 0);
16412 arg1 = gimple_call_arg (stmt, 1);
16413 lhs = gimple_call_lhs (stmt);
16414 gimple_seq stmts = NULL;
16415 /* Convert arg0 to unsigned. */
16416 tree arg0_unsigned
16417 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16418 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16419 tree res
16420 = gimple_build (&stmts, RSHIFT_EXPR,
16421 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16422 /* Convert result back to the lhs type. */
16423 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16424 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16425 update_call_from_tree (gsi, res);
16426 return true;
16428 /* Vector loads. */
16429 case ALTIVEC_BUILTIN_LVX_V16QI:
16430 case ALTIVEC_BUILTIN_LVX_V8HI:
16431 case ALTIVEC_BUILTIN_LVX_V4SI:
16432 case ALTIVEC_BUILTIN_LVX_V4SF:
16433 case ALTIVEC_BUILTIN_LVX_V2DI:
16434 case ALTIVEC_BUILTIN_LVX_V2DF:
16435 case ALTIVEC_BUILTIN_LVX_V1TI:
16437 arg0 = gimple_call_arg (stmt, 0); // offset
16438 arg1 = gimple_call_arg (stmt, 1); // address
16439 /* Do not fold for -maltivec=be on LE targets. */
16440 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16441 return false;
16442 lhs = gimple_call_lhs (stmt);
16443 location_t loc = gimple_location (stmt);
16444 /* Since arg1 may be cast to a different type, just use ptr_type_node
16445 here instead of trying to enforce TBAA on pointer types. */
16446 tree arg1_type = ptr_type_node;
16447 tree lhs_type = TREE_TYPE (lhs);
16448 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16449 the tree using the value from arg0. The resulting type will match
16450 the type of arg1. */
16451 gimple_seq stmts = NULL;
16452 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16453 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16454 arg1_type, arg1, temp_offset);
16455 /* Mask off any lower bits from the address. */
16456 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16457 arg1_type, temp_addr,
16458 build_int_cst (arg1_type, -16));
16459 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16460 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16461 take an offset, but since we've already incorporated the offset
16462 above, here we just pass in a zero. */
16463 gimple *g
16464 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16465 build_int_cst (arg1_type, 0)));
16466 gimple_set_location (g, loc);
16467 gsi_replace (gsi, g, true);
16468 return true;
16470 /* Vector stores. */
16471 case ALTIVEC_BUILTIN_STVX_V16QI:
16472 case ALTIVEC_BUILTIN_STVX_V8HI:
16473 case ALTIVEC_BUILTIN_STVX_V4SI:
16474 case ALTIVEC_BUILTIN_STVX_V4SF:
16475 case ALTIVEC_BUILTIN_STVX_V2DI:
16476 case ALTIVEC_BUILTIN_STVX_V2DF:
16478 /* Do not fold for -maltivec=be on LE targets. */
16479 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16480 return false;
16481 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16482 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16483 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16484 location_t loc = gimple_location (stmt);
16485 tree arg0_type = TREE_TYPE (arg0);
16486 /* Use ptr_type_node (no TBAA) for the arg2_type.
16487 FIXME: (Richard) "A proper fix would be to transition this type as
16488 seen from the frontend to GIMPLE, for example in a similar way we
16489 do for MEM_REFs by piggy-backing that on an extra argument, a
16490 constant zero pointer of the alias pointer type to use (which would
16491 also serve as a type indicator of the store itself). I'd use a
16492 target specific internal function for this (not sure if we can have
16493 those target specific, but I guess if it's folded away then that's
16494 fine) and get away with the overload set." */
16495 tree arg2_type = ptr_type_node;
16496 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16497 the tree using the value from arg0. The resulting type will match
16498 the type of arg2. */
16499 gimple_seq stmts = NULL;
16500 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16501 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16502 arg2_type, arg2, temp_offset);
16503 /* Mask off any lower bits from the address. */
16504 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16505 arg2_type, temp_addr,
16506 build_int_cst (arg2_type, -16));
16507 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16508 /* The desired gimple result should be similar to:
16509 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16510 gimple *g
16511 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
16512 build_int_cst (arg2_type, 0)), arg0);
16513 gimple_set_location (g, loc);
16514 gsi_replace (gsi, g, true);
16515 return true;
16518 /* Vector Fused multiply-add (fma). */
16519 case ALTIVEC_BUILTIN_VMADDFP:
16520 case VSX_BUILTIN_XVMADDDP:
16521 case ALTIVEC_BUILTIN_VMLADDUHM:
16523 arg0 = gimple_call_arg (stmt, 0);
16524 arg1 = gimple_call_arg (stmt, 1);
16525 tree arg2 = gimple_call_arg (stmt, 2);
16526 lhs = gimple_call_lhs (stmt);
16527 gimple *g = gimple_build_assign (lhs, FMA_EXPR, arg0, arg1, arg2);
16528 gimple_set_location (g, gimple_location (stmt));
16529 gsi_replace (gsi, g, true);
16530 return true;
16533 /* Vector compares; EQ, NE, GE, GT, LE. */
16534 case ALTIVEC_BUILTIN_VCMPEQUB:
16535 case ALTIVEC_BUILTIN_VCMPEQUH:
16536 case ALTIVEC_BUILTIN_VCMPEQUW:
16537 case P8V_BUILTIN_VCMPEQUD:
16538 fold_compare_helper (gsi, EQ_EXPR, stmt);
16539 return true;
16541 case P9V_BUILTIN_CMPNEB:
16542 case P9V_BUILTIN_CMPNEH:
16543 case P9V_BUILTIN_CMPNEW:
16544 fold_compare_helper (gsi, NE_EXPR, stmt);
16545 return true;
16547 case VSX_BUILTIN_CMPGE_16QI:
16548 case VSX_BUILTIN_CMPGE_U16QI:
16549 case VSX_BUILTIN_CMPGE_8HI:
16550 case VSX_BUILTIN_CMPGE_U8HI:
16551 case VSX_BUILTIN_CMPGE_4SI:
16552 case VSX_BUILTIN_CMPGE_U4SI:
16553 case VSX_BUILTIN_CMPGE_2DI:
16554 case VSX_BUILTIN_CMPGE_U2DI:
16555 fold_compare_helper (gsi, GE_EXPR, stmt);
16556 return true;
16558 case ALTIVEC_BUILTIN_VCMPGTSB:
16559 case ALTIVEC_BUILTIN_VCMPGTUB:
16560 case ALTIVEC_BUILTIN_VCMPGTSH:
16561 case ALTIVEC_BUILTIN_VCMPGTUH:
16562 case ALTIVEC_BUILTIN_VCMPGTSW:
16563 case ALTIVEC_BUILTIN_VCMPGTUW:
16564 case P8V_BUILTIN_VCMPGTUD:
16565 case P8V_BUILTIN_VCMPGTSD:
16566 fold_compare_helper (gsi, GT_EXPR, stmt);
16567 return true;
16569 case VSX_BUILTIN_CMPLE_16QI:
16570 case VSX_BUILTIN_CMPLE_U16QI:
16571 case VSX_BUILTIN_CMPLE_8HI:
16572 case VSX_BUILTIN_CMPLE_U8HI:
16573 case VSX_BUILTIN_CMPLE_4SI:
16574 case VSX_BUILTIN_CMPLE_U4SI:
16575 case VSX_BUILTIN_CMPLE_2DI:
16576 case VSX_BUILTIN_CMPLE_U2DI:
16577 fold_compare_helper (gsi, LE_EXPR, stmt);
16578 return true;
16580 /* flavors of vec_splat_[us]{8,16,32}. */
16581 case ALTIVEC_BUILTIN_VSPLTISB:
16582 case ALTIVEC_BUILTIN_VSPLTISH:
16583 case ALTIVEC_BUILTIN_VSPLTISW:
16585 arg0 = gimple_call_arg (stmt, 0);
16586 lhs = gimple_call_lhs (stmt);
16587 /* Only fold the vec_splat_*() if arg0 is constant. */
16588 if (TREE_CODE (arg0) != INTEGER_CST)
16589 return false;
16590 gimple_seq stmts = NULL;
16591 location_t loc = gimple_location (stmt);
16592 tree splat_value = gimple_convert (&stmts, loc,
16593 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16594 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16595 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16596 g = gimple_build_assign (lhs, splat_tree);
16597 gimple_set_location (g, gimple_location (stmt));
16598 gsi_replace (gsi, g, true);
16599 return true;
16602 /* vec_mergel (integrals). */
16603 case ALTIVEC_BUILTIN_VMRGLH:
16604 case ALTIVEC_BUILTIN_VMRGLW:
16605 case VSX_BUILTIN_XXMRGLW_4SI:
16606 case ALTIVEC_BUILTIN_VMRGLB:
16607 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16608 /* Do not fold for -maltivec=be on LE targets. */
16609 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16610 return false;
16611 fold_mergehl_helper (gsi, stmt, 1);
16612 return true;
16613 /* vec_mergeh (integrals). */
16614 case ALTIVEC_BUILTIN_VMRGHH:
16615 case ALTIVEC_BUILTIN_VMRGHW:
16616 case VSX_BUILTIN_XXMRGHW_4SI:
16617 case ALTIVEC_BUILTIN_VMRGHB:
16618 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16619 /* Do not fold for -maltivec=be on LE targets. */
16620 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16621 return false;
16622 fold_mergehl_helper (gsi, stmt, 0);
16623 return true;
16624 default:
16625 if (TARGET_DEBUG_BUILTIN)
16626 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16627 fn_code, fn_name1, fn_name2);
16628 break;
16631 return false;
16634 /* Expand an expression EXP that calls a built-in function,
16635 with result going to TARGET if that's convenient
16636 (and in mode MODE if that's convenient).
16637 SUBTARGET may be used as the target for computing one of EXP's operands.
16638 IGNORE is nonzero if the value is to be ignored. */
16640 static rtx
16641 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16642 machine_mode mode ATTRIBUTE_UNUSED,
16643 int ignore ATTRIBUTE_UNUSED)
16645 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16646 enum rs6000_builtins fcode
16647 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16648 size_t uns_fcode = (size_t)fcode;
16649 const struct builtin_description *d;
16650 size_t i;
16651 rtx ret;
16652 bool success;
16653 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16654 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16655 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16657 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16658 floating point type, depending on whether long double is the IBM extended
16659 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16660 we only define one variant of the built-in function, and switch the code
16661 when defining it, rather than defining two built-ins and using the
16662 overload table in rs6000-c.c to switch between the two. If we don't have
16663 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16664 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16665 #ifdef HAVE_AS_POWER9
16666 if (FLOAT128_IEEE_P (TFmode))
16667 switch (icode)
16669 default:
16670 break;
16672 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16673 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16674 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16675 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16676 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16677 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16678 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16679 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16680 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16681 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16682 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16683 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16684 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16686 #endif
16688 if (TARGET_DEBUG_BUILTIN)
16690 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16691 const char *name2 = (icode != CODE_FOR_nothing)
16692 ? get_insn_name ((int) icode)
16693 : "nothing";
16694 const char *name3;
16696 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16698 default: name3 = "unknown"; break;
16699 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16700 case RS6000_BTC_UNARY: name3 = "unary"; break;
16701 case RS6000_BTC_BINARY: name3 = "binary"; break;
16702 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16703 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16704 case RS6000_BTC_ABS: name3 = "abs"; break;
16705 case RS6000_BTC_DST: name3 = "dst"; break;
16709 fprintf (stderr,
16710 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16711 (name1) ? name1 : "---", fcode,
16712 (name2) ? name2 : "---", (int) icode,
16713 name3,
16714 func_valid_p ? "" : ", not valid");
16717 if (!func_valid_p)
16719 rs6000_invalid_builtin (fcode);
16721 /* Given it is invalid, just generate a normal call. */
16722 return expand_call (exp, target, ignore);
16725 switch (fcode)
16727 case RS6000_BUILTIN_RECIP:
16728 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16730 case RS6000_BUILTIN_RECIPF:
16731 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16733 case RS6000_BUILTIN_RSQRTF:
16734 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16736 case RS6000_BUILTIN_RSQRT:
16737 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16739 case POWER7_BUILTIN_BPERMD:
16740 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16741 ? CODE_FOR_bpermd_di
16742 : CODE_FOR_bpermd_si), exp, target);
16744 case RS6000_BUILTIN_GET_TB:
16745 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16746 target);
16748 case RS6000_BUILTIN_MFTB:
16749 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16750 ? CODE_FOR_rs6000_mftb_di
16751 : CODE_FOR_rs6000_mftb_si),
16752 target);
16754 case RS6000_BUILTIN_MFFS:
16755 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16757 case RS6000_BUILTIN_MTFSF:
16758 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16760 case RS6000_BUILTIN_CPU_INIT:
16761 case RS6000_BUILTIN_CPU_IS:
16762 case RS6000_BUILTIN_CPU_SUPPORTS:
16763 return cpu_expand_builtin (fcode, exp, target);
16765 case MISC_BUILTIN_SPEC_BARRIER:
16767 emit_insn (gen_rs6000_speculation_barrier ());
16768 return NULL_RTX;
16771 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16772 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16774 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16775 : (int) CODE_FOR_altivec_lvsl_direct);
16776 machine_mode tmode = insn_data[icode2].operand[0].mode;
16777 machine_mode mode = insn_data[icode2].operand[1].mode;
16778 tree arg;
16779 rtx op, addr, pat;
16781 gcc_assert (TARGET_ALTIVEC);
16783 arg = CALL_EXPR_ARG (exp, 0);
16784 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16785 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16786 addr = memory_address (mode, op);
16787 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16788 op = addr;
16789 else
16791 /* For the load case need to negate the address. */
16792 op = gen_reg_rtx (GET_MODE (addr));
16793 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16795 op = gen_rtx_MEM (mode, op);
16797 if (target == 0
16798 || GET_MODE (target) != tmode
16799 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16800 target = gen_reg_rtx (tmode);
16802 pat = GEN_FCN (icode2) (target, op);
16803 if (!pat)
16804 return 0;
16805 emit_insn (pat);
16807 return target;
16810 case ALTIVEC_BUILTIN_VCFUX:
16811 case ALTIVEC_BUILTIN_VCFSX:
16812 case ALTIVEC_BUILTIN_VCTUXS:
16813 case ALTIVEC_BUILTIN_VCTSXS:
16814 /* FIXME: There's got to be a nicer way to handle this case than
16815 constructing a new CALL_EXPR. */
16816 if (call_expr_nargs (exp) == 1)
16818 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16819 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16821 break;
16823 default:
16824 break;
16827 if (TARGET_ALTIVEC)
16829 ret = altivec_expand_builtin (exp, target, &success);
16831 if (success)
16832 return ret;
16834 if (TARGET_PAIRED_FLOAT)
16836 ret = paired_expand_builtin (exp, target, &success);
16838 if (success)
16839 return ret;
16841 if (TARGET_HTM)
16843 ret = htm_expand_builtin (exp, target, &success);
16845 if (success)
16846 return ret;
16849 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16850 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16851 gcc_assert (attr == RS6000_BTC_UNARY
16852 || attr == RS6000_BTC_BINARY
16853 || attr == RS6000_BTC_TERNARY
16854 || attr == RS6000_BTC_SPECIAL);
16856 /* Handle simple unary operations. */
16857 d = bdesc_1arg;
16858 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16859 if (d->code == fcode)
16860 return rs6000_expand_unop_builtin (icode, exp, target);
16862 /* Handle simple binary operations. */
16863 d = bdesc_2arg;
16864 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16865 if (d->code == fcode)
16866 return rs6000_expand_binop_builtin (icode, exp, target);
16868 /* Handle simple ternary operations. */
16869 d = bdesc_3arg;
16870 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16871 if (d->code == fcode)
16872 return rs6000_expand_ternop_builtin (icode, exp, target);
16874 /* Handle simple no-argument operations. */
16875 d = bdesc_0arg;
16876 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16877 if (d->code == fcode)
16878 return rs6000_expand_zeroop_builtin (icode, target);
16880 gcc_unreachable ();
16883 /* Create a builtin vector type with a name. Taking care not to give
16884 the canonical type a name. */
16886 static tree
16887 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16889 tree result = build_vector_type (elt_type, num_elts);
16891 /* Copy so we don't give the canonical type a name. */
16892 result = build_variant_type_copy (result);
16894 add_builtin_type (name, result);
16896 return result;
16899 static void
16900 rs6000_init_builtins (void)
16902 tree tdecl;
16903 tree ftype;
16904 machine_mode mode;
16906 if (TARGET_DEBUG_BUILTIN)
16907 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16908 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16909 (TARGET_ALTIVEC) ? ", altivec" : "",
16910 (TARGET_VSX) ? ", vsx" : "");
16912 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16913 V2SF_type_node = build_vector_type (float_type_node, 2);
16914 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16915 : "__vector long long",
16916 intDI_type_node, 2);
16917 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16918 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16919 intSI_type_node, 4);
16920 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16921 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16922 intHI_type_node, 8);
16923 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16924 intQI_type_node, 16);
16926 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16927 unsigned_intQI_type_node, 16);
16928 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16929 unsigned_intHI_type_node, 8);
16930 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16931 unsigned_intSI_type_node, 4);
16932 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16933 ? "__vector unsigned long"
16934 : "__vector unsigned long long",
16935 unsigned_intDI_type_node, 2);
16937 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16938 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16939 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16940 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16942 const_str_type_node
16943 = build_pointer_type (build_qualified_type (char_type_node,
16944 TYPE_QUAL_CONST));
16946 /* We use V1TI mode as a special container to hold __int128_t items that
16947 must live in VSX registers. */
16948 if (intTI_type_node)
16950 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16951 intTI_type_node, 1);
16952 unsigned_V1TI_type_node
16953 = rs6000_vector_type ("__vector unsigned __int128",
16954 unsigned_intTI_type_node, 1);
16957 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16958 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16959 'vector unsigned short'. */
16961 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16962 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16963 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16964 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16965 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16967 long_integer_type_internal_node = long_integer_type_node;
16968 long_unsigned_type_internal_node = long_unsigned_type_node;
16969 long_long_integer_type_internal_node = long_long_integer_type_node;
16970 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16971 intQI_type_internal_node = intQI_type_node;
16972 uintQI_type_internal_node = unsigned_intQI_type_node;
16973 intHI_type_internal_node = intHI_type_node;
16974 uintHI_type_internal_node = unsigned_intHI_type_node;
16975 intSI_type_internal_node = intSI_type_node;
16976 uintSI_type_internal_node = unsigned_intSI_type_node;
16977 intDI_type_internal_node = intDI_type_node;
16978 uintDI_type_internal_node = unsigned_intDI_type_node;
16979 intTI_type_internal_node = intTI_type_node;
16980 uintTI_type_internal_node = unsigned_intTI_type_node;
16981 float_type_internal_node = float_type_node;
16982 double_type_internal_node = double_type_node;
16983 long_double_type_internal_node = long_double_type_node;
16984 dfloat64_type_internal_node = dfloat64_type_node;
16985 dfloat128_type_internal_node = dfloat128_type_node;
16986 void_type_internal_node = void_type_node;
16988 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16989 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16990 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16991 format that uses a pair of doubles, depending on the switches and
16992 defaults.
16994 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16995 floating point, we need make sure the type is non-zero or else self-test
16996 fails during bootstrap.
16998 We don't register a built-in type for __ibm128 if the type is the same as
16999 long double. Instead we add a #define for __ibm128 in
17000 rs6000_cpu_cpp_builtins to long double.
17002 For IEEE 128-bit floating point, always create the type __ieee128. If the
17003 user used -mfloat128, rs6000-c.c will create a define from __float128 to
17004 __ieee128. */
17005 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17007 ibm128_float_type_node = make_node (REAL_TYPE);
17008 TYPE_PRECISION (ibm128_float_type_node) = 128;
17009 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17010 layout_type (ibm128_float_type_node);
17012 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17013 "__ibm128");
17015 else
17016 ibm128_float_type_node = long_double_type_node;
17018 if (TARGET_FLOAT128_TYPE)
17020 ieee128_float_type_node = float128_type_node;
17021 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17022 "__ieee128");
17025 else
17026 ieee128_float_type_node = long_double_type_node;
17028 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17029 tree type node. */
17030 builtin_mode_to_type[QImode][0] = integer_type_node;
17031 builtin_mode_to_type[HImode][0] = integer_type_node;
17032 builtin_mode_to_type[SImode][0] = intSI_type_node;
17033 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17034 builtin_mode_to_type[DImode][0] = intDI_type_node;
17035 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17036 builtin_mode_to_type[TImode][0] = intTI_type_node;
17037 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17038 builtin_mode_to_type[SFmode][0] = float_type_node;
17039 builtin_mode_to_type[DFmode][0] = double_type_node;
17040 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17041 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17042 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17043 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17044 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17045 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17046 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17047 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17048 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17049 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17050 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17051 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17052 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17053 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17054 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17055 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17056 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17057 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17058 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17060 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17061 TYPE_NAME (bool_char_type_node) = tdecl;
17063 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17064 TYPE_NAME (bool_short_type_node) = tdecl;
17066 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17067 TYPE_NAME (bool_int_type_node) = tdecl;
17069 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17070 TYPE_NAME (pixel_type_node) = tdecl;
17072 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17073 bool_char_type_node, 16);
17074 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17075 bool_short_type_node, 8);
17076 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17077 bool_int_type_node, 4);
17078 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17079 ? "__vector __bool long"
17080 : "__vector __bool long long",
17081 bool_long_long_type_node, 2);
17082 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17083 pixel_type_node, 8);
17085 /* Paired builtins are only available if you build a compiler with the
17086 appropriate options, so only create those builtins with the appropriate
17087 compiler option. Create Altivec and VSX builtins on machines with at
17088 least the general purpose extensions (970 and newer) to allow the use of
17089 the target attribute. */
17090 if (TARGET_PAIRED_FLOAT)
17091 paired_init_builtins ();
17092 if (TARGET_EXTRA_BUILTINS)
17093 altivec_init_builtins ();
17094 if (TARGET_HTM)
17095 htm_init_builtins ();
17097 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17098 rs6000_common_init_builtins ();
17100 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17101 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17102 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17104 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17105 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17106 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17108 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17109 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17110 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17112 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17113 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17114 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17116 mode = (TARGET_64BIT) ? DImode : SImode;
17117 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17118 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17119 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17121 ftype = build_function_type_list (unsigned_intDI_type_node,
17122 NULL_TREE);
17123 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17125 if (TARGET_64BIT)
17126 ftype = build_function_type_list (unsigned_intDI_type_node,
17127 NULL_TREE);
17128 else
17129 ftype = build_function_type_list (unsigned_intSI_type_node,
17130 NULL_TREE);
17131 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17133 ftype = build_function_type_list (double_type_node, NULL_TREE);
17134 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17136 ftype = build_function_type_list (void_type_node,
17137 intSI_type_node, double_type_node,
17138 NULL_TREE);
17139 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17141 ftype = build_function_type_list (void_type_node, NULL_TREE);
17142 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17143 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
17144 MISC_BUILTIN_SPEC_BARRIER);
17146 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17147 NULL_TREE);
17148 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17149 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17151 /* AIX libm provides clog as __clog. */
17152 if (TARGET_XCOFF &&
17153 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17154 set_user_assembler_name (tdecl, "__clog");
17156 #ifdef SUBTARGET_INIT_BUILTINS
17157 SUBTARGET_INIT_BUILTINS;
17158 #endif
17161 /* Returns the rs6000 builtin decl for CODE. */
17163 static tree
17164 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17166 HOST_WIDE_INT fnmask;
17168 if (code >= RS6000_BUILTIN_COUNT)
17169 return error_mark_node;
17171 fnmask = rs6000_builtin_info[code].mask;
17172 if ((fnmask & rs6000_builtin_mask) != fnmask)
17174 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17175 return error_mark_node;
17178 return rs6000_builtin_decls[code];
17181 static void
17182 paired_init_builtins (void)
17184 const struct builtin_description *d;
17185 size_t i;
17186 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17188 tree int_ftype_int_v2sf_v2sf
17189 = build_function_type_list (integer_type_node,
17190 integer_type_node,
17191 V2SF_type_node,
17192 V2SF_type_node,
17193 NULL_TREE);
17194 tree pcfloat_type_node =
17195 build_pointer_type (build_qualified_type
17196 (float_type_node, TYPE_QUAL_CONST));
17198 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17199 long_integer_type_node,
17200 pcfloat_type_node,
17201 NULL_TREE);
17202 tree void_ftype_v2sf_long_pcfloat =
17203 build_function_type_list (void_type_node,
17204 V2SF_type_node,
17205 long_integer_type_node,
17206 pcfloat_type_node,
17207 NULL_TREE);
17210 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17211 PAIRED_BUILTIN_LX);
17214 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17215 PAIRED_BUILTIN_STX);
17217 /* Predicates. */
17218 d = bdesc_paired_preds;
17219 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17221 tree type;
17222 HOST_WIDE_INT mask = d->mask;
17224 if ((mask & builtin_mask) != mask)
17226 if (TARGET_DEBUG_BUILTIN)
17227 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17228 d->name);
17229 continue;
17232 /* Cannot define builtin if the instruction is disabled. */
17233 gcc_assert (d->icode != CODE_FOR_nothing);
17235 if (TARGET_DEBUG_BUILTIN)
17236 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17237 (int)i, get_insn_name (d->icode), (int)d->icode,
17238 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17240 switch (insn_data[d->icode].operand[1].mode)
17242 case E_V2SFmode:
17243 type = int_ftype_int_v2sf_v2sf;
17244 break;
17245 default:
17246 gcc_unreachable ();
17249 def_builtin (d->name, type, d->code);
17253 static void
17254 altivec_init_builtins (void)
17256 const struct builtin_description *d;
17257 size_t i;
17258 tree ftype;
17259 tree decl;
17260 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17262 tree pvoid_type_node = build_pointer_type (void_type_node);
17264 tree pcvoid_type_node
17265 = build_pointer_type (build_qualified_type (void_type_node,
17266 TYPE_QUAL_CONST));
17268 tree int_ftype_opaque
17269 = build_function_type_list (integer_type_node,
17270 opaque_V4SI_type_node, NULL_TREE);
17271 tree opaque_ftype_opaque
17272 = build_function_type_list (integer_type_node, NULL_TREE);
17273 tree opaque_ftype_opaque_int
17274 = build_function_type_list (opaque_V4SI_type_node,
17275 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17276 tree opaque_ftype_opaque_opaque_int
17277 = build_function_type_list (opaque_V4SI_type_node,
17278 opaque_V4SI_type_node, opaque_V4SI_type_node,
17279 integer_type_node, NULL_TREE);
17280 tree opaque_ftype_opaque_opaque_opaque
17281 = build_function_type_list (opaque_V4SI_type_node,
17282 opaque_V4SI_type_node, opaque_V4SI_type_node,
17283 opaque_V4SI_type_node, NULL_TREE);
17284 tree opaque_ftype_opaque_opaque
17285 = build_function_type_list (opaque_V4SI_type_node,
17286 opaque_V4SI_type_node, opaque_V4SI_type_node,
17287 NULL_TREE);
17288 tree int_ftype_int_opaque_opaque
17289 = build_function_type_list (integer_type_node,
17290 integer_type_node, opaque_V4SI_type_node,
17291 opaque_V4SI_type_node, NULL_TREE);
17292 tree int_ftype_int_v4si_v4si
17293 = build_function_type_list (integer_type_node,
17294 integer_type_node, V4SI_type_node,
17295 V4SI_type_node, NULL_TREE);
17296 tree int_ftype_int_v2di_v2di
17297 = build_function_type_list (integer_type_node,
17298 integer_type_node, V2DI_type_node,
17299 V2DI_type_node, NULL_TREE);
17300 tree void_ftype_v4si
17301 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17302 tree v8hi_ftype_void
17303 = build_function_type_list (V8HI_type_node, NULL_TREE);
17304 tree void_ftype_void
17305 = build_function_type_list (void_type_node, NULL_TREE);
17306 tree void_ftype_int
17307 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17309 tree opaque_ftype_long_pcvoid
17310 = build_function_type_list (opaque_V4SI_type_node,
17311 long_integer_type_node, pcvoid_type_node,
17312 NULL_TREE);
17313 tree v16qi_ftype_long_pcvoid
17314 = build_function_type_list (V16QI_type_node,
17315 long_integer_type_node, pcvoid_type_node,
17316 NULL_TREE);
17317 tree v8hi_ftype_long_pcvoid
17318 = build_function_type_list (V8HI_type_node,
17319 long_integer_type_node, pcvoid_type_node,
17320 NULL_TREE);
17321 tree v4si_ftype_long_pcvoid
17322 = build_function_type_list (V4SI_type_node,
17323 long_integer_type_node, pcvoid_type_node,
17324 NULL_TREE);
17325 tree v4sf_ftype_long_pcvoid
17326 = build_function_type_list (V4SF_type_node,
17327 long_integer_type_node, pcvoid_type_node,
17328 NULL_TREE);
17329 tree v2df_ftype_long_pcvoid
17330 = build_function_type_list (V2DF_type_node,
17331 long_integer_type_node, pcvoid_type_node,
17332 NULL_TREE);
17333 tree v2di_ftype_long_pcvoid
17334 = build_function_type_list (V2DI_type_node,
17335 long_integer_type_node, pcvoid_type_node,
17336 NULL_TREE);
17337 tree v1ti_ftype_long_pcvoid
17338 = build_function_type_list (V1TI_type_node,
17339 long_integer_type_node, pcvoid_type_node,
17340 NULL_TREE);
17342 tree void_ftype_opaque_long_pvoid
17343 = build_function_type_list (void_type_node,
17344 opaque_V4SI_type_node, long_integer_type_node,
17345 pvoid_type_node, NULL_TREE);
17346 tree void_ftype_v4si_long_pvoid
17347 = build_function_type_list (void_type_node,
17348 V4SI_type_node, long_integer_type_node,
17349 pvoid_type_node, NULL_TREE);
17350 tree void_ftype_v16qi_long_pvoid
17351 = build_function_type_list (void_type_node,
17352 V16QI_type_node, long_integer_type_node,
17353 pvoid_type_node, NULL_TREE);
17355 tree void_ftype_v16qi_pvoid_long
17356 = build_function_type_list (void_type_node,
17357 V16QI_type_node, pvoid_type_node,
17358 long_integer_type_node, NULL_TREE);
17360 tree void_ftype_v8hi_long_pvoid
17361 = build_function_type_list (void_type_node,
17362 V8HI_type_node, long_integer_type_node,
17363 pvoid_type_node, NULL_TREE);
17364 tree void_ftype_v4sf_long_pvoid
17365 = build_function_type_list (void_type_node,
17366 V4SF_type_node, long_integer_type_node,
17367 pvoid_type_node, NULL_TREE);
17368 tree void_ftype_v2df_long_pvoid
17369 = build_function_type_list (void_type_node,
17370 V2DF_type_node, long_integer_type_node,
17371 pvoid_type_node, NULL_TREE);
17372 tree void_ftype_v1ti_long_pvoid
17373 = build_function_type_list (void_type_node,
17374 V1TI_type_node, long_integer_type_node,
17375 pvoid_type_node, NULL_TREE);
17376 tree void_ftype_v2di_long_pvoid
17377 = build_function_type_list (void_type_node,
17378 V2DI_type_node, long_integer_type_node,
17379 pvoid_type_node, NULL_TREE);
17380 tree int_ftype_int_v8hi_v8hi
17381 = build_function_type_list (integer_type_node,
17382 integer_type_node, V8HI_type_node,
17383 V8HI_type_node, NULL_TREE);
17384 tree int_ftype_int_v16qi_v16qi
17385 = build_function_type_list (integer_type_node,
17386 integer_type_node, V16QI_type_node,
17387 V16QI_type_node, NULL_TREE);
17388 tree int_ftype_int_v4sf_v4sf
17389 = build_function_type_list (integer_type_node,
17390 integer_type_node, V4SF_type_node,
17391 V4SF_type_node, NULL_TREE);
17392 tree int_ftype_int_v2df_v2df
17393 = build_function_type_list (integer_type_node,
17394 integer_type_node, V2DF_type_node,
17395 V2DF_type_node, NULL_TREE);
17396 tree v2di_ftype_v2di
17397 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17398 tree v4si_ftype_v4si
17399 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17400 tree v8hi_ftype_v8hi
17401 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17402 tree v16qi_ftype_v16qi
17403 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17404 tree v4sf_ftype_v4sf
17405 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17406 tree v2df_ftype_v2df
17407 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17408 tree void_ftype_pcvoid_int_int
17409 = build_function_type_list (void_type_node,
17410 pcvoid_type_node, integer_type_node,
17411 integer_type_node, NULL_TREE);
17413 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17414 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17415 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17416 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17417 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17418 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17419 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17420 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17421 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17422 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17423 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17424 ALTIVEC_BUILTIN_LVXL_V2DF);
17425 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17426 ALTIVEC_BUILTIN_LVXL_V2DI);
17427 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17428 ALTIVEC_BUILTIN_LVXL_V4SF);
17429 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17430 ALTIVEC_BUILTIN_LVXL_V4SI);
17431 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17432 ALTIVEC_BUILTIN_LVXL_V8HI);
17433 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17434 ALTIVEC_BUILTIN_LVXL_V16QI);
17435 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17436 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17437 ALTIVEC_BUILTIN_LVX_V1TI);
17438 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17439 ALTIVEC_BUILTIN_LVX_V2DF);
17440 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17441 ALTIVEC_BUILTIN_LVX_V2DI);
17442 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17443 ALTIVEC_BUILTIN_LVX_V4SF);
17444 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17445 ALTIVEC_BUILTIN_LVX_V4SI);
17446 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17447 ALTIVEC_BUILTIN_LVX_V8HI);
17448 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17449 ALTIVEC_BUILTIN_LVX_V16QI);
17450 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17451 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17452 ALTIVEC_BUILTIN_STVX_V2DF);
17453 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17454 ALTIVEC_BUILTIN_STVX_V2DI);
17455 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17456 ALTIVEC_BUILTIN_STVX_V4SF);
17457 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17458 ALTIVEC_BUILTIN_STVX_V4SI);
17459 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17460 ALTIVEC_BUILTIN_STVX_V8HI);
17461 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17462 ALTIVEC_BUILTIN_STVX_V16QI);
17463 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17464 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17465 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17466 ALTIVEC_BUILTIN_STVXL_V2DF);
17467 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17468 ALTIVEC_BUILTIN_STVXL_V2DI);
17469 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17470 ALTIVEC_BUILTIN_STVXL_V4SF);
17471 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17472 ALTIVEC_BUILTIN_STVXL_V4SI);
17473 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17474 ALTIVEC_BUILTIN_STVXL_V8HI);
17475 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17476 ALTIVEC_BUILTIN_STVXL_V16QI);
17477 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17478 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17479 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17480 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17481 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17482 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17483 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17484 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17485 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17486 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17487 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17488 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17489 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17490 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17491 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17492 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17494 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17495 VSX_BUILTIN_LXVD2X_V2DF);
17496 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17497 VSX_BUILTIN_LXVD2X_V2DI);
17498 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17499 VSX_BUILTIN_LXVW4X_V4SF);
17500 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17501 VSX_BUILTIN_LXVW4X_V4SI);
17502 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17503 VSX_BUILTIN_LXVW4X_V8HI);
17504 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17505 VSX_BUILTIN_LXVW4X_V16QI);
17506 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17507 VSX_BUILTIN_STXVD2X_V2DF);
17508 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17509 VSX_BUILTIN_STXVD2X_V2DI);
17510 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17511 VSX_BUILTIN_STXVW4X_V4SF);
17512 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17513 VSX_BUILTIN_STXVW4X_V4SI);
17514 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17515 VSX_BUILTIN_STXVW4X_V8HI);
17516 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17517 VSX_BUILTIN_STXVW4X_V16QI);
17519 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17520 VSX_BUILTIN_LD_ELEMREV_V2DF);
17521 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17522 VSX_BUILTIN_LD_ELEMREV_V2DI);
17523 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17524 VSX_BUILTIN_LD_ELEMREV_V4SF);
17525 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17526 VSX_BUILTIN_LD_ELEMREV_V4SI);
17527 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17528 VSX_BUILTIN_LD_ELEMREV_V8HI);
17529 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17530 VSX_BUILTIN_LD_ELEMREV_V16QI);
17531 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17532 VSX_BUILTIN_ST_ELEMREV_V2DF);
17533 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17534 VSX_BUILTIN_ST_ELEMREV_V1TI);
17535 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17536 VSX_BUILTIN_ST_ELEMREV_V2DI);
17537 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17538 VSX_BUILTIN_ST_ELEMREV_V4SF);
17539 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17540 VSX_BUILTIN_ST_ELEMREV_V4SI);
17541 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17542 VSX_BUILTIN_ST_ELEMREV_V8HI);
17543 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17544 VSX_BUILTIN_ST_ELEMREV_V16QI);
17546 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17547 VSX_BUILTIN_VEC_LD);
17548 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17549 VSX_BUILTIN_VEC_ST);
17550 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17551 VSX_BUILTIN_VEC_XL);
17552 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17553 VSX_BUILTIN_VEC_XL_BE);
17554 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17555 VSX_BUILTIN_VEC_XST);
17556 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17557 VSX_BUILTIN_VEC_XST_BE);
17559 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17560 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17561 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17563 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17564 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17565 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17566 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17567 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17568 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17569 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17570 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17571 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17572 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17573 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17574 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17576 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17577 ALTIVEC_BUILTIN_VEC_ADDE);
17578 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17579 ALTIVEC_BUILTIN_VEC_ADDEC);
17580 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17581 ALTIVEC_BUILTIN_VEC_CMPNE);
17582 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17583 ALTIVEC_BUILTIN_VEC_MUL);
17584 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17585 ALTIVEC_BUILTIN_VEC_SUBE);
17586 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17587 ALTIVEC_BUILTIN_VEC_SUBEC);
17589 /* Cell builtins. */
17590 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17591 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17592 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17593 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17595 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17596 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17597 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17598 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17600 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17601 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17602 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17603 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17605 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17606 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17607 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17608 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17610 if (TARGET_P9_VECTOR)
17612 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17613 P9V_BUILTIN_STXVL);
17614 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17615 P9V_BUILTIN_XST_LEN_R);
17618 /* Add the DST variants. */
17619 d = bdesc_dst;
17620 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17622 HOST_WIDE_INT mask = d->mask;
17624 /* It is expected that these dst built-in functions may have
17625 d->icode equal to CODE_FOR_nothing. */
17626 if ((mask & builtin_mask) != mask)
17628 if (TARGET_DEBUG_BUILTIN)
17629 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17630 d->name);
17631 continue;
17633 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17636 /* Initialize the predicates. */
17637 d = bdesc_altivec_preds;
17638 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17640 machine_mode mode1;
17641 tree type;
17642 HOST_WIDE_INT mask = d->mask;
17644 if ((mask & builtin_mask) != mask)
17646 if (TARGET_DEBUG_BUILTIN)
17647 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17648 d->name);
17649 continue;
17652 if (rs6000_overloaded_builtin_p (d->code))
17653 mode1 = VOIDmode;
17654 else
17656 /* Cannot define builtin if the instruction is disabled. */
17657 gcc_assert (d->icode != CODE_FOR_nothing);
17658 mode1 = insn_data[d->icode].operand[1].mode;
17661 switch (mode1)
17663 case E_VOIDmode:
17664 type = int_ftype_int_opaque_opaque;
17665 break;
17666 case E_V2DImode:
17667 type = int_ftype_int_v2di_v2di;
17668 break;
17669 case E_V4SImode:
17670 type = int_ftype_int_v4si_v4si;
17671 break;
17672 case E_V8HImode:
17673 type = int_ftype_int_v8hi_v8hi;
17674 break;
17675 case E_V16QImode:
17676 type = int_ftype_int_v16qi_v16qi;
17677 break;
17678 case E_V4SFmode:
17679 type = int_ftype_int_v4sf_v4sf;
17680 break;
17681 case E_V2DFmode:
17682 type = int_ftype_int_v2df_v2df;
17683 break;
17684 default:
17685 gcc_unreachable ();
17688 def_builtin (d->name, type, d->code);
17691 /* Initialize the abs* operators. */
17692 d = bdesc_abs;
17693 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17695 machine_mode mode0;
17696 tree type;
17697 HOST_WIDE_INT mask = d->mask;
17699 if ((mask & builtin_mask) != mask)
17701 if (TARGET_DEBUG_BUILTIN)
17702 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17703 d->name);
17704 continue;
17707 /* Cannot define builtin if the instruction is disabled. */
17708 gcc_assert (d->icode != CODE_FOR_nothing);
17709 mode0 = insn_data[d->icode].operand[0].mode;
17711 switch (mode0)
17713 case E_V2DImode:
17714 type = v2di_ftype_v2di;
17715 break;
17716 case E_V4SImode:
17717 type = v4si_ftype_v4si;
17718 break;
17719 case E_V8HImode:
17720 type = v8hi_ftype_v8hi;
17721 break;
17722 case E_V16QImode:
17723 type = v16qi_ftype_v16qi;
17724 break;
17725 case E_V4SFmode:
17726 type = v4sf_ftype_v4sf;
17727 break;
17728 case E_V2DFmode:
17729 type = v2df_ftype_v2df;
17730 break;
17731 default:
17732 gcc_unreachable ();
17735 def_builtin (d->name, type, d->code);
17738 /* Initialize target builtin that implements
17739 targetm.vectorize.builtin_mask_for_load. */
17741 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17742 v16qi_ftype_long_pcvoid,
17743 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17744 BUILT_IN_MD, NULL, NULL_TREE);
17745 TREE_READONLY (decl) = 1;
17746 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17747 altivec_builtin_mask_for_load = decl;
17749 /* Access to the vec_init patterns. */
17750 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17751 integer_type_node, integer_type_node,
17752 integer_type_node, NULL_TREE);
17753 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17755 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17756 short_integer_type_node,
17757 short_integer_type_node,
17758 short_integer_type_node,
17759 short_integer_type_node,
17760 short_integer_type_node,
17761 short_integer_type_node,
17762 short_integer_type_node, NULL_TREE);
17763 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17765 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17766 char_type_node, char_type_node,
17767 char_type_node, char_type_node,
17768 char_type_node, char_type_node,
17769 char_type_node, char_type_node,
17770 char_type_node, char_type_node,
17771 char_type_node, char_type_node,
17772 char_type_node, char_type_node,
17773 char_type_node, NULL_TREE);
17774 def_builtin ("__builtin_vec_init_v16qi", ftype,
17775 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17777 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17778 float_type_node, float_type_node,
17779 float_type_node, NULL_TREE);
17780 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17782 /* VSX builtins. */
17783 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17784 double_type_node, NULL_TREE);
17785 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17787 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17788 intDI_type_node, NULL_TREE);
17789 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17791 /* Access to the vec_set patterns. */
17792 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17793 intSI_type_node,
17794 integer_type_node, NULL_TREE);
17795 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17797 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17798 intHI_type_node,
17799 integer_type_node, NULL_TREE);
17800 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17802 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17803 intQI_type_node,
17804 integer_type_node, NULL_TREE);
17805 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17807 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17808 float_type_node,
17809 integer_type_node, NULL_TREE);
17810 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17812 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17813 double_type_node,
17814 integer_type_node, NULL_TREE);
17815 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17817 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17818 intDI_type_node,
17819 integer_type_node, NULL_TREE);
17820 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17822 /* Access to the vec_extract patterns. */
17823 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17824 integer_type_node, NULL_TREE);
17825 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17827 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17828 integer_type_node, NULL_TREE);
17829 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17831 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17832 integer_type_node, NULL_TREE);
17833 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17835 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17836 integer_type_node, NULL_TREE);
17837 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17839 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17840 integer_type_node, NULL_TREE);
17841 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17843 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17844 integer_type_node, NULL_TREE);
17845 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17848 if (V1TI_type_node)
17850 tree v1ti_ftype_long_pcvoid
17851 = build_function_type_list (V1TI_type_node,
17852 long_integer_type_node, pcvoid_type_node,
17853 NULL_TREE);
17854 tree void_ftype_v1ti_long_pvoid
17855 = build_function_type_list (void_type_node,
17856 V1TI_type_node, long_integer_type_node,
17857 pvoid_type_node, NULL_TREE);
17858 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17859 VSX_BUILTIN_LD_ELEMREV_V1TI);
17860 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17861 VSX_BUILTIN_LXVD2X_V1TI);
17862 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17863 VSX_BUILTIN_STXVD2X_V1TI);
17864 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17865 NULL_TREE, NULL_TREE);
17866 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17867 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17868 intTI_type_node,
17869 integer_type_node, NULL_TREE);
17870 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17871 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17872 integer_type_node, NULL_TREE);
17873 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17878 static void
17879 htm_init_builtins (void)
17881 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17882 const struct builtin_description *d;
17883 size_t i;
17885 d = bdesc_htm;
17886 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17888 tree op[MAX_HTM_OPERANDS], type;
17889 HOST_WIDE_INT mask = d->mask;
17890 unsigned attr = rs6000_builtin_info[d->code].attr;
17891 bool void_func = (attr & RS6000_BTC_VOID);
17892 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17893 int nopnds = 0;
17894 tree gpr_type_node;
17895 tree rettype;
17896 tree argtype;
17898 /* It is expected that these htm built-in functions may have
17899 d->icode equal to CODE_FOR_nothing. */
17901 if (TARGET_32BIT && TARGET_POWERPC64)
17902 gpr_type_node = long_long_unsigned_type_node;
17903 else
17904 gpr_type_node = long_unsigned_type_node;
17906 if (attr & RS6000_BTC_SPR)
17908 rettype = gpr_type_node;
17909 argtype = gpr_type_node;
17911 else if (d->code == HTM_BUILTIN_TABORTDC
17912 || d->code == HTM_BUILTIN_TABORTDCI)
17914 rettype = unsigned_type_node;
17915 argtype = gpr_type_node;
17917 else
17919 rettype = unsigned_type_node;
17920 argtype = unsigned_type_node;
17923 if ((mask & builtin_mask) != mask)
17925 if (TARGET_DEBUG_BUILTIN)
17926 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17927 continue;
17930 if (d->name == 0)
17932 if (TARGET_DEBUG_BUILTIN)
17933 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17934 (long unsigned) i);
17935 continue;
17938 op[nopnds++] = (void_func) ? void_type_node : rettype;
17940 if (attr_args == RS6000_BTC_UNARY)
17941 op[nopnds++] = argtype;
17942 else if (attr_args == RS6000_BTC_BINARY)
17944 op[nopnds++] = argtype;
17945 op[nopnds++] = argtype;
17947 else if (attr_args == RS6000_BTC_TERNARY)
17949 op[nopnds++] = argtype;
17950 op[nopnds++] = argtype;
17951 op[nopnds++] = argtype;
17954 switch (nopnds)
17956 case 1:
17957 type = build_function_type_list (op[0], NULL_TREE);
17958 break;
17959 case 2:
17960 type = build_function_type_list (op[0], op[1], NULL_TREE);
17961 break;
17962 case 3:
17963 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17964 break;
17965 case 4:
17966 type = build_function_type_list (op[0], op[1], op[2], op[3],
17967 NULL_TREE);
17968 break;
17969 default:
17970 gcc_unreachable ();
17973 def_builtin (d->name, type, d->code);
17977 /* Hash function for builtin functions with up to 3 arguments and a return
17978 type. */
17979 hashval_t
17980 builtin_hasher::hash (builtin_hash_struct *bh)
17982 unsigned ret = 0;
17983 int i;
17985 for (i = 0; i < 4; i++)
17987 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17988 ret = (ret * 2) + bh->uns_p[i];
17991 return ret;
17994 /* Compare builtin hash entries H1 and H2 for equivalence. */
17995 bool
17996 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17998 return ((p1->mode[0] == p2->mode[0])
17999 && (p1->mode[1] == p2->mode[1])
18000 && (p1->mode[2] == p2->mode[2])
18001 && (p1->mode[3] == p2->mode[3])
18002 && (p1->uns_p[0] == p2->uns_p[0])
18003 && (p1->uns_p[1] == p2->uns_p[1])
18004 && (p1->uns_p[2] == p2->uns_p[2])
18005 && (p1->uns_p[3] == p2->uns_p[3]));
18008 /* Map types for builtin functions with an explicit return type and up to 3
18009 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18010 of the argument. */
18011 static tree
18012 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18013 machine_mode mode_arg1, machine_mode mode_arg2,
18014 enum rs6000_builtins builtin, const char *name)
18016 struct builtin_hash_struct h;
18017 struct builtin_hash_struct *h2;
18018 int num_args = 3;
18019 int i;
18020 tree ret_type = NULL_TREE;
18021 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18023 /* Create builtin_hash_table. */
18024 if (builtin_hash_table == NULL)
18025 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18027 h.type = NULL_TREE;
18028 h.mode[0] = mode_ret;
18029 h.mode[1] = mode_arg0;
18030 h.mode[2] = mode_arg1;
18031 h.mode[3] = mode_arg2;
18032 h.uns_p[0] = 0;
18033 h.uns_p[1] = 0;
18034 h.uns_p[2] = 0;
18035 h.uns_p[3] = 0;
18037 /* If the builtin is a type that produces unsigned results or takes unsigned
18038 arguments, and it is returned as a decl for the vectorizer (such as
18039 widening multiplies, permute), make sure the arguments and return value
18040 are type correct. */
18041 switch (builtin)
18043 /* unsigned 1 argument functions. */
18044 case CRYPTO_BUILTIN_VSBOX:
18045 case P8V_BUILTIN_VGBBD:
18046 case MISC_BUILTIN_CDTBCD:
18047 case MISC_BUILTIN_CBCDTD:
18048 h.uns_p[0] = 1;
18049 h.uns_p[1] = 1;
18050 break;
18052 /* unsigned 2 argument functions. */
18053 case ALTIVEC_BUILTIN_VMULEUB:
18054 case ALTIVEC_BUILTIN_VMULEUH:
18055 case P8V_BUILTIN_VMULEUW:
18056 case ALTIVEC_BUILTIN_VMULOUB:
18057 case ALTIVEC_BUILTIN_VMULOUH:
18058 case P8V_BUILTIN_VMULOUW:
18059 case CRYPTO_BUILTIN_VCIPHER:
18060 case CRYPTO_BUILTIN_VCIPHERLAST:
18061 case CRYPTO_BUILTIN_VNCIPHER:
18062 case CRYPTO_BUILTIN_VNCIPHERLAST:
18063 case CRYPTO_BUILTIN_VPMSUMB:
18064 case CRYPTO_BUILTIN_VPMSUMH:
18065 case CRYPTO_BUILTIN_VPMSUMW:
18066 case CRYPTO_BUILTIN_VPMSUMD:
18067 case CRYPTO_BUILTIN_VPMSUM:
18068 case MISC_BUILTIN_ADDG6S:
18069 case MISC_BUILTIN_DIVWEU:
18070 case MISC_BUILTIN_DIVDEU:
18071 case VSX_BUILTIN_UDIV_V2DI:
18072 case ALTIVEC_BUILTIN_VMAXUB:
18073 case ALTIVEC_BUILTIN_VMINUB:
18074 case ALTIVEC_BUILTIN_VMAXUH:
18075 case ALTIVEC_BUILTIN_VMINUH:
18076 case ALTIVEC_BUILTIN_VMAXUW:
18077 case ALTIVEC_BUILTIN_VMINUW:
18078 case P8V_BUILTIN_VMAXUD:
18079 case P8V_BUILTIN_VMINUD:
18080 h.uns_p[0] = 1;
18081 h.uns_p[1] = 1;
18082 h.uns_p[2] = 1;
18083 break;
18085 /* unsigned 3 argument functions. */
18086 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18087 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18088 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18089 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18090 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18091 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18092 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18093 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18094 case VSX_BUILTIN_VPERM_16QI_UNS:
18095 case VSX_BUILTIN_VPERM_8HI_UNS:
18096 case VSX_BUILTIN_VPERM_4SI_UNS:
18097 case VSX_BUILTIN_VPERM_2DI_UNS:
18098 case VSX_BUILTIN_XXSEL_16QI_UNS:
18099 case VSX_BUILTIN_XXSEL_8HI_UNS:
18100 case VSX_BUILTIN_XXSEL_4SI_UNS:
18101 case VSX_BUILTIN_XXSEL_2DI_UNS:
18102 case CRYPTO_BUILTIN_VPERMXOR:
18103 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18104 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18105 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18106 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18107 case CRYPTO_BUILTIN_VSHASIGMAW:
18108 case CRYPTO_BUILTIN_VSHASIGMAD:
18109 case CRYPTO_BUILTIN_VSHASIGMA:
18110 h.uns_p[0] = 1;
18111 h.uns_p[1] = 1;
18112 h.uns_p[2] = 1;
18113 h.uns_p[3] = 1;
18114 break;
18116 /* signed permute functions with unsigned char mask. */
18117 case ALTIVEC_BUILTIN_VPERM_16QI:
18118 case ALTIVEC_BUILTIN_VPERM_8HI:
18119 case ALTIVEC_BUILTIN_VPERM_4SI:
18120 case ALTIVEC_BUILTIN_VPERM_4SF:
18121 case ALTIVEC_BUILTIN_VPERM_2DI:
18122 case ALTIVEC_BUILTIN_VPERM_2DF:
18123 case VSX_BUILTIN_VPERM_16QI:
18124 case VSX_BUILTIN_VPERM_8HI:
18125 case VSX_BUILTIN_VPERM_4SI:
18126 case VSX_BUILTIN_VPERM_4SF:
18127 case VSX_BUILTIN_VPERM_2DI:
18128 case VSX_BUILTIN_VPERM_2DF:
18129 h.uns_p[3] = 1;
18130 break;
18132 /* unsigned args, signed return. */
18133 case VSX_BUILTIN_XVCVUXDSP:
18134 case VSX_BUILTIN_XVCVUXDDP_UNS:
18135 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18136 h.uns_p[1] = 1;
18137 break;
18139 /* signed args, unsigned return. */
18140 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18141 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18142 case MISC_BUILTIN_UNPACK_TD:
18143 case MISC_BUILTIN_UNPACK_V1TI:
18144 h.uns_p[0] = 1;
18145 break;
18147 /* unsigned arguments, bool return (compares). */
18148 case ALTIVEC_BUILTIN_VCMPEQUB:
18149 case ALTIVEC_BUILTIN_VCMPEQUH:
18150 case ALTIVEC_BUILTIN_VCMPEQUW:
18151 case P8V_BUILTIN_VCMPEQUD:
18152 case VSX_BUILTIN_CMPGE_U16QI:
18153 case VSX_BUILTIN_CMPGE_U8HI:
18154 case VSX_BUILTIN_CMPGE_U4SI:
18155 case VSX_BUILTIN_CMPGE_U2DI:
18156 case ALTIVEC_BUILTIN_VCMPGTUB:
18157 case ALTIVEC_BUILTIN_VCMPGTUH:
18158 case ALTIVEC_BUILTIN_VCMPGTUW:
18159 case P8V_BUILTIN_VCMPGTUD:
18160 h.uns_p[1] = 1;
18161 h.uns_p[2] = 1;
18162 break;
18164 /* unsigned arguments for 128-bit pack instructions. */
18165 case MISC_BUILTIN_PACK_TD:
18166 case MISC_BUILTIN_PACK_V1TI:
18167 h.uns_p[1] = 1;
18168 h.uns_p[2] = 1;
18169 break;
18171 /* unsigned second arguments (vector shift right). */
18172 case ALTIVEC_BUILTIN_VSRB:
18173 case ALTIVEC_BUILTIN_VSRH:
18174 case ALTIVEC_BUILTIN_VSRW:
18175 case P8V_BUILTIN_VSRD:
18176 h.uns_p[2] = 1;
18177 break;
18179 default:
18180 break;
18183 /* Figure out how many args are present. */
18184 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18185 num_args--;
18187 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18188 if (!ret_type && h.uns_p[0])
18189 ret_type = builtin_mode_to_type[h.mode[0]][0];
18191 if (!ret_type)
18192 fatal_error (input_location,
18193 "internal error: builtin function %qs had an unexpected "
18194 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18196 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18197 arg_type[i] = NULL_TREE;
18199 for (i = 0; i < num_args; i++)
18201 int m = (int) h.mode[i+1];
18202 int uns_p = h.uns_p[i+1];
18204 arg_type[i] = builtin_mode_to_type[m][uns_p];
18205 if (!arg_type[i] && uns_p)
18206 arg_type[i] = builtin_mode_to_type[m][0];
18208 if (!arg_type[i])
18209 fatal_error (input_location,
18210 "internal error: builtin function %qs, argument %d "
18211 "had unexpected argument type %qs", name, i,
18212 GET_MODE_NAME (m));
18215 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18216 if (*found == NULL)
18218 h2 = ggc_alloc<builtin_hash_struct> ();
18219 *h2 = h;
18220 *found = h2;
18222 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18223 arg_type[2], NULL_TREE);
18226 return (*found)->type;
18229 static void
18230 rs6000_common_init_builtins (void)
18232 const struct builtin_description *d;
18233 size_t i;
18235 tree opaque_ftype_opaque = NULL_TREE;
18236 tree opaque_ftype_opaque_opaque = NULL_TREE;
18237 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18238 tree v2si_ftype = NULL_TREE;
18239 tree v2si_ftype_qi = NULL_TREE;
18240 tree v2si_ftype_v2si_qi = NULL_TREE;
18241 tree v2si_ftype_int_qi = NULL_TREE;
18242 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18244 if (!TARGET_PAIRED_FLOAT)
18246 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18247 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18250 /* Paired builtins are only available if you build a compiler with the
18251 appropriate options, so only create those builtins with the appropriate
18252 compiler option. Create Altivec and VSX builtins on machines with at
18253 least the general purpose extensions (970 and newer) to allow the use of
18254 the target attribute.. */
18256 if (TARGET_EXTRA_BUILTINS)
18257 builtin_mask |= RS6000_BTM_COMMON;
18259 /* Add the ternary operators. */
18260 d = bdesc_3arg;
18261 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18263 tree type;
18264 HOST_WIDE_INT mask = d->mask;
18266 if ((mask & builtin_mask) != mask)
18268 if (TARGET_DEBUG_BUILTIN)
18269 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18270 continue;
18273 if (rs6000_overloaded_builtin_p (d->code))
18275 if (! (type = opaque_ftype_opaque_opaque_opaque))
18276 type = opaque_ftype_opaque_opaque_opaque
18277 = build_function_type_list (opaque_V4SI_type_node,
18278 opaque_V4SI_type_node,
18279 opaque_V4SI_type_node,
18280 opaque_V4SI_type_node,
18281 NULL_TREE);
18283 else
18285 enum insn_code icode = d->icode;
18286 if (d->name == 0)
18288 if (TARGET_DEBUG_BUILTIN)
18289 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18290 (long unsigned)i);
18292 continue;
18295 if (icode == CODE_FOR_nothing)
18297 if (TARGET_DEBUG_BUILTIN)
18298 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18299 d->name);
18301 continue;
18304 type = builtin_function_type (insn_data[icode].operand[0].mode,
18305 insn_data[icode].operand[1].mode,
18306 insn_data[icode].operand[2].mode,
18307 insn_data[icode].operand[3].mode,
18308 d->code, d->name);
18311 def_builtin (d->name, type, d->code);
18314 /* Add the binary operators. */
18315 d = bdesc_2arg;
18316 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18318 machine_mode mode0, mode1, mode2;
18319 tree type;
18320 HOST_WIDE_INT mask = d->mask;
18322 if ((mask & builtin_mask) != mask)
18324 if (TARGET_DEBUG_BUILTIN)
18325 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18326 continue;
18329 if (rs6000_overloaded_builtin_p (d->code))
18331 if (! (type = opaque_ftype_opaque_opaque))
18332 type = opaque_ftype_opaque_opaque
18333 = build_function_type_list (opaque_V4SI_type_node,
18334 opaque_V4SI_type_node,
18335 opaque_V4SI_type_node,
18336 NULL_TREE);
18338 else
18340 enum insn_code icode = d->icode;
18341 if (d->name == 0)
18343 if (TARGET_DEBUG_BUILTIN)
18344 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18345 (long unsigned)i);
18347 continue;
18350 if (icode == CODE_FOR_nothing)
18352 if (TARGET_DEBUG_BUILTIN)
18353 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18354 d->name);
18356 continue;
18359 mode0 = insn_data[icode].operand[0].mode;
18360 mode1 = insn_data[icode].operand[1].mode;
18361 mode2 = insn_data[icode].operand[2].mode;
18363 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18365 if (! (type = v2si_ftype_v2si_qi))
18366 type = v2si_ftype_v2si_qi
18367 = build_function_type_list (opaque_V2SI_type_node,
18368 opaque_V2SI_type_node,
18369 char_type_node,
18370 NULL_TREE);
18373 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18374 && mode2 == QImode)
18376 if (! (type = v2si_ftype_int_qi))
18377 type = v2si_ftype_int_qi
18378 = build_function_type_list (opaque_V2SI_type_node,
18379 integer_type_node,
18380 char_type_node,
18381 NULL_TREE);
18384 else
18385 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18386 d->code, d->name);
18389 def_builtin (d->name, type, d->code);
18392 /* Add the simple unary operators. */
18393 d = bdesc_1arg;
18394 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18396 machine_mode mode0, mode1;
18397 tree type;
18398 HOST_WIDE_INT mask = d->mask;
18400 if ((mask & builtin_mask) != mask)
18402 if (TARGET_DEBUG_BUILTIN)
18403 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18404 continue;
18407 if (rs6000_overloaded_builtin_p (d->code))
18409 if (! (type = opaque_ftype_opaque))
18410 type = opaque_ftype_opaque
18411 = build_function_type_list (opaque_V4SI_type_node,
18412 opaque_V4SI_type_node,
18413 NULL_TREE);
18415 else
18417 enum insn_code icode = d->icode;
18418 if (d->name == 0)
18420 if (TARGET_DEBUG_BUILTIN)
18421 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18422 (long unsigned)i);
18424 continue;
18427 if (icode == CODE_FOR_nothing)
18429 if (TARGET_DEBUG_BUILTIN)
18430 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18431 d->name);
18433 continue;
18436 mode0 = insn_data[icode].operand[0].mode;
18437 mode1 = insn_data[icode].operand[1].mode;
18439 if (mode0 == V2SImode && mode1 == QImode)
18441 if (! (type = v2si_ftype_qi))
18442 type = v2si_ftype_qi
18443 = build_function_type_list (opaque_V2SI_type_node,
18444 char_type_node,
18445 NULL_TREE);
18448 else
18449 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18450 d->code, d->name);
18453 def_builtin (d->name, type, d->code);
18456 /* Add the simple no-argument operators. */
18457 d = bdesc_0arg;
18458 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18460 machine_mode mode0;
18461 tree type;
18462 HOST_WIDE_INT mask = d->mask;
18464 if ((mask & builtin_mask) != mask)
18466 if (TARGET_DEBUG_BUILTIN)
18467 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18468 continue;
18470 if (rs6000_overloaded_builtin_p (d->code))
18472 if (!opaque_ftype_opaque)
18473 opaque_ftype_opaque
18474 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18475 type = opaque_ftype_opaque;
18477 else
18479 enum insn_code icode = d->icode;
18480 if (d->name == 0)
18482 if (TARGET_DEBUG_BUILTIN)
18483 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18484 (long unsigned) i);
18485 continue;
18487 if (icode == CODE_FOR_nothing)
18489 if (TARGET_DEBUG_BUILTIN)
18490 fprintf (stderr,
18491 "rs6000_builtin, skip no-argument %s (no code)\n",
18492 d->name);
18493 continue;
18495 mode0 = insn_data[icode].operand[0].mode;
18496 if (mode0 == V2SImode)
18498 /* code for paired single */
18499 if (! (type = v2si_ftype))
18501 v2si_ftype
18502 = build_function_type_list (opaque_V2SI_type_node,
18503 NULL_TREE);
18504 type = v2si_ftype;
18507 else
18508 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18509 d->code, d->name);
18511 def_builtin (d->name, type, d->code);
18515 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18516 static void
18517 init_float128_ibm (machine_mode mode)
18519 if (!TARGET_XL_COMPAT)
18521 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18522 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18523 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18524 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18526 if (!TARGET_HARD_FLOAT)
18528 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18529 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18530 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18531 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18532 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18533 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18534 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18535 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18537 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18538 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18539 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18540 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18541 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18542 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18543 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18544 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18547 else
18549 set_optab_libfunc (add_optab, mode, "_xlqadd");
18550 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18551 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18552 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18555 /* Add various conversions for IFmode to use the traditional TFmode
18556 names. */
18557 if (mode == IFmode)
18559 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18560 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18561 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18562 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18563 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18564 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18566 if (TARGET_POWERPC64)
18568 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18569 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18570 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18571 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18576 /* Create a decl for either complex long double multiply or complex long double
18577 divide when long double is IEEE 128-bit floating point. We can't use
18578 __multc3 and __divtc3 because the original long double using IBM extended
18579 double used those names. The complex multiply/divide functions are encoded
18580 as builtin functions with a complex result and 4 scalar inputs. */
18582 static void
18583 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18585 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18586 name, NULL_TREE);
18588 set_builtin_decl (fncode, fndecl, true);
18590 if (TARGET_DEBUG_BUILTIN)
18591 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18593 return;
18596 /* Set up IEEE 128-bit floating point routines. Use different names if the
18597 arguments can be passed in a vector register. The historical PowerPC
18598 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18599 continue to use that if we aren't using vector registers to pass IEEE
18600 128-bit floating point. */
18602 static void
18603 init_float128_ieee (machine_mode mode)
18605 if (FLOAT128_VECTOR_P (mode))
18607 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. */
18608 if (mode == TFmode && TARGET_IEEEQUAD)
18610 built_in_function fncode_mul =
18611 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18612 - MIN_MODE_COMPLEX_FLOAT);
18613 built_in_function fncode_div =
18614 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18615 - MIN_MODE_COMPLEX_FLOAT);
18617 tree fntype = build_function_type_list (complex_long_double_type_node,
18618 long_double_type_node,
18619 long_double_type_node,
18620 long_double_type_node,
18621 long_double_type_node,
18622 NULL_TREE);
18624 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18625 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18628 set_optab_libfunc (add_optab, mode, "__addkf3");
18629 set_optab_libfunc (sub_optab, mode, "__subkf3");
18630 set_optab_libfunc (neg_optab, mode, "__negkf2");
18631 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18632 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18633 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18634 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18636 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18637 set_optab_libfunc (ne_optab, mode, "__nekf2");
18638 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18639 set_optab_libfunc (ge_optab, mode, "__gekf2");
18640 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18641 set_optab_libfunc (le_optab, mode, "__lekf2");
18642 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18644 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18645 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18646 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18647 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18649 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18650 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18651 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18653 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18654 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18655 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18657 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18658 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18659 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18660 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18661 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18662 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18664 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18665 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18666 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18667 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18669 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18670 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18671 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18672 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18674 if (TARGET_POWERPC64)
18676 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18677 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18678 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18679 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18683 else
18685 set_optab_libfunc (add_optab, mode, "_q_add");
18686 set_optab_libfunc (sub_optab, mode, "_q_sub");
18687 set_optab_libfunc (neg_optab, mode, "_q_neg");
18688 set_optab_libfunc (smul_optab, mode, "_q_mul");
18689 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18690 if (TARGET_PPC_GPOPT)
18691 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18693 set_optab_libfunc (eq_optab, mode, "_q_feq");
18694 set_optab_libfunc (ne_optab, mode, "_q_fne");
18695 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18696 set_optab_libfunc (ge_optab, mode, "_q_fge");
18697 set_optab_libfunc (lt_optab, mode, "_q_flt");
18698 set_optab_libfunc (le_optab, mode, "_q_fle");
18700 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18701 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18702 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18703 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18704 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18705 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18706 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18707 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18711 static void
18712 rs6000_init_libfuncs (void)
18714 /* __float128 support. */
18715 if (TARGET_FLOAT128_TYPE)
18717 init_float128_ibm (IFmode);
18718 init_float128_ieee (KFmode);
18721 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18722 if (TARGET_LONG_DOUBLE_128)
18724 if (!TARGET_IEEEQUAD)
18725 init_float128_ibm (TFmode);
18727 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18728 else
18729 init_float128_ieee (TFmode);
18733 /* Emit a potentially record-form instruction, setting DST from SRC.
18734 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18735 signed comparison of DST with zero. If DOT is 1, the generated RTL
18736 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18737 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18738 a separate COMPARE. */
18740 void
18741 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18743 if (dot == 0)
18745 emit_move_insn (dst, src);
18746 return;
18749 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18751 emit_move_insn (dst, src);
18752 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18753 return;
18756 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18757 if (dot == 1)
18759 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18760 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18762 else
18764 rtx set = gen_rtx_SET (dst, src);
18765 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18770 /* A validation routine: say whether CODE, a condition code, and MODE
18771 match. The other alternatives either don't make sense or should
18772 never be generated. */
18774 void
18775 validate_condition_mode (enum rtx_code code, machine_mode mode)
18777 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18778 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18779 && GET_MODE_CLASS (mode) == MODE_CC);
18781 /* These don't make sense. */
18782 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18783 || mode != CCUNSmode);
18785 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18786 || mode == CCUNSmode);
18788 gcc_assert (mode == CCFPmode
18789 || (code != ORDERED && code != UNORDERED
18790 && code != UNEQ && code != LTGT
18791 && code != UNGT && code != UNLT
18792 && code != UNGE && code != UNLE));
18794 /* These should never be generated except for
18795 flag_finite_math_only. */
18796 gcc_assert (mode != CCFPmode
18797 || flag_finite_math_only
18798 || (code != LE && code != GE
18799 && code != UNEQ && code != LTGT
18800 && code != UNGT && code != UNLT));
18802 /* These are invalid; the information is not there. */
18803 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18807 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18808 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18809 not zero, store there the bit offset (counted from the right) where
18810 the single stretch of 1 bits begins; and similarly for B, the bit
18811 offset where it ends. */
18813 bool
18814 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18816 unsigned HOST_WIDE_INT val = INTVAL (mask);
18817 unsigned HOST_WIDE_INT bit;
18818 int nb, ne;
18819 int n = GET_MODE_PRECISION (mode);
18821 if (mode != DImode && mode != SImode)
18822 return false;
18824 if (INTVAL (mask) >= 0)
18826 bit = val & -val;
18827 ne = exact_log2 (bit);
18828 nb = exact_log2 (val + bit);
18830 else if (val + 1 == 0)
18832 nb = n;
18833 ne = 0;
18835 else if (val & 1)
18837 val = ~val;
18838 bit = val & -val;
18839 nb = exact_log2 (bit);
18840 ne = exact_log2 (val + bit);
18842 else
18844 bit = val & -val;
18845 ne = exact_log2 (bit);
18846 if (val + bit == 0)
18847 nb = n;
18848 else
18849 nb = 0;
18852 nb--;
18854 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18855 return false;
18857 if (b)
18858 *b = nb;
18859 if (e)
18860 *e = ne;
18862 return true;
18865 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18866 or rldicr instruction, to implement an AND with it in mode MODE. */
18868 bool
18869 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18871 int nb, ne;
18873 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18874 return false;
18876 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18877 does not wrap. */
18878 if (mode == DImode)
18879 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18881 /* For SImode, rlwinm can do everything. */
18882 if (mode == SImode)
18883 return (nb < 32 && ne < 32);
18885 return false;
18888 /* Return the instruction template for an AND with mask in mode MODE, with
18889 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18891 const char *
18892 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18894 int nb, ne;
18896 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18897 gcc_unreachable ();
18899 if (mode == DImode && ne == 0)
18901 operands[3] = GEN_INT (63 - nb);
18902 if (dot)
18903 return "rldicl. %0,%1,0,%3";
18904 return "rldicl %0,%1,0,%3";
18907 if (mode == DImode && nb == 63)
18909 operands[3] = GEN_INT (63 - ne);
18910 if (dot)
18911 return "rldicr. %0,%1,0,%3";
18912 return "rldicr %0,%1,0,%3";
18915 if (nb < 32 && ne < 32)
18917 operands[3] = GEN_INT (31 - nb);
18918 operands[4] = GEN_INT (31 - ne);
18919 if (dot)
18920 return "rlwinm. %0,%1,0,%3,%4";
18921 return "rlwinm %0,%1,0,%3,%4";
18924 gcc_unreachable ();
18927 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18928 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18929 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18931 bool
18932 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18934 int nb, ne;
18936 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18937 return false;
18939 int n = GET_MODE_PRECISION (mode);
18940 int sh = -1;
18942 if (CONST_INT_P (XEXP (shift, 1)))
18944 sh = INTVAL (XEXP (shift, 1));
18945 if (sh < 0 || sh >= n)
18946 return false;
18949 rtx_code code = GET_CODE (shift);
18951 /* Convert any shift by 0 to a rotate, to simplify below code. */
18952 if (sh == 0)
18953 code = ROTATE;
18955 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18956 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18957 code = ASHIFT;
18958 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18960 code = LSHIFTRT;
18961 sh = n - sh;
18964 /* DImode rotates need rld*. */
18965 if (mode == DImode && code == ROTATE)
18966 return (nb == 63 || ne == 0 || ne == sh);
18968 /* SImode rotates need rlw*. */
18969 if (mode == SImode && code == ROTATE)
18970 return (nb < 32 && ne < 32 && sh < 32);
18972 /* Wrap-around masks are only okay for rotates. */
18973 if (ne > nb)
18974 return false;
18976 /* Variable shifts are only okay for rotates. */
18977 if (sh < 0)
18978 return false;
18980 /* Don't allow ASHIFT if the mask is wrong for that. */
18981 if (code == ASHIFT && ne < sh)
18982 return false;
18984 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18985 if the mask is wrong for that. */
18986 if (nb < 32 && ne < 32 && sh < 32
18987 && !(code == LSHIFTRT && nb >= 32 - sh))
18988 return true;
18990 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18991 if the mask is wrong for that. */
18992 if (code == LSHIFTRT)
18993 sh = 64 - sh;
18994 if (nb == 63 || ne == 0 || ne == sh)
18995 return !(code == LSHIFTRT && nb >= sh);
18997 return false;
19000 /* Return the instruction template for a shift with mask in mode MODE, with
19001 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19003 const char *
19004 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
19006 int nb, ne;
19008 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19009 gcc_unreachable ();
19011 if (mode == DImode && ne == 0)
19013 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19014 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
19015 operands[3] = GEN_INT (63 - nb);
19016 if (dot)
19017 return "rld%I2cl. %0,%1,%2,%3";
19018 return "rld%I2cl %0,%1,%2,%3";
19021 if (mode == DImode && nb == 63)
19023 operands[3] = GEN_INT (63 - ne);
19024 if (dot)
19025 return "rld%I2cr. %0,%1,%2,%3";
19026 return "rld%I2cr %0,%1,%2,%3";
19029 if (mode == DImode
19030 && GET_CODE (operands[4]) != LSHIFTRT
19031 && CONST_INT_P (operands[2])
19032 && ne == INTVAL (operands[2]))
19034 operands[3] = GEN_INT (63 - nb);
19035 if (dot)
19036 return "rld%I2c. %0,%1,%2,%3";
19037 return "rld%I2c %0,%1,%2,%3";
19040 if (nb < 32 && ne < 32)
19042 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19043 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19044 operands[3] = GEN_INT (31 - nb);
19045 operands[4] = GEN_INT (31 - ne);
19046 /* This insn can also be a 64-bit rotate with mask that really makes
19047 it just a shift right (with mask); the %h below are to adjust for
19048 that situation (shift count is >= 32 in that case). */
19049 if (dot)
19050 return "rlw%I2nm. %0,%1,%h2,%3,%4";
19051 return "rlw%I2nm %0,%1,%h2,%3,%4";
19054 gcc_unreachable ();
19057 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
19058 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
19059 ASHIFT, or LSHIFTRT) in mode MODE. */
19061 bool
19062 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
19064 int nb, ne;
19066 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19067 return false;
19069 int n = GET_MODE_PRECISION (mode);
19071 int sh = INTVAL (XEXP (shift, 1));
19072 if (sh < 0 || sh >= n)
19073 return false;
19075 rtx_code code = GET_CODE (shift);
19077 /* Convert any shift by 0 to a rotate, to simplify below code. */
19078 if (sh == 0)
19079 code = ROTATE;
19081 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19082 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19083 code = ASHIFT;
19084 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19086 code = LSHIFTRT;
19087 sh = n - sh;
19090 /* DImode rotates need rldimi. */
19091 if (mode == DImode && code == ROTATE)
19092 return (ne == sh);
19094 /* SImode rotates need rlwimi. */
19095 if (mode == SImode && code == ROTATE)
19096 return (nb < 32 && ne < 32 && sh < 32);
19098 /* Wrap-around masks are only okay for rotates. */
19099 if (ne > nb)
19100 return false;
19102 /* Don't allow ASHIFT if the mask is wrong for that. */
19103 if (code == ASHIFT && ne < sh)
19104 return false;
19106 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19107 if the mask is wrong for that. */
19108 if (nb < 32 && ne < 32 && sh < 32
19109 && !(code == LSHIFTRT && nb >= 32 - sh))
19110 return true;
19112 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19113 if the mask is wrong for that. */
19114 if (code == LSHIFTRT)
19115 sh = 64 - sh;
19116 if (ne == sh)
19117 return !(code == LSHIFTRT && nb >= sh);
19119 return false;
19122 /* Return the instruction template for an insert with mask in mode MODE, with
19123 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19125 const char *
19126 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19128 int nb, ne;
19130 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19131 gcc_unreachable ();
19133 /* Prefer rldimi because rlwimi is cracked. */
19134 if (TARGET_POWERPC64
19135 && (!dot || mode == DImode)
19136 && GET_CODE (operands[4]) != LSHIFTRT
19137 && ne == INTVAL (operands[2]))
19139 operands[3] = GEN_INT (63 - nb);
19140 if (dot)
19141 return "rldimi. %0,%1,%2,%3";
19142 return "rldimi %0,%1,%2,%3";
19145 if (nb < 32 && ne < 32)
19147 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19148 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19149 operands[3] = GEN_INT (31 - nb);
19150 operands[4] = GEN_INT (31 - ne);
19151 if (dot)
19152 return "rlwimi. %0,%1,%2,%3,%4";
19153 return "rlwimi %0,%1,%2,%3,%4";
19156 gcc_unreachable ();
19159 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19160 using two machine instructions. */
19162 bool
19163 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19165 /* There are two kinds of AND we can handle with two insns:
19166 1) those we can do with two rl* insn;
19167 2) ori[s];xori[s].
19169 We do not handle that last case yet. */
19171 /* If there is just one stretch of ones, we can do it. */
19172 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19173 return true;
19175 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19176 one insn, we can do the whole thing with two. */
19177 unsigned HOST_WIDE_INT val = INTVAL (c);
19178 unsigned HOST_WIDE_INT bit1 = val & -val;
19179 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19180 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19181 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19182 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19185 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19186 If EXPAND is true, split rotate-and-mask instructions we generate to
19187 their constituent parts as well (this is used during expand); if DOT
19188 is 1, make the last insn a record-form instruction clobbering the
19189 destination GPR and setting the CC reg (from operands[3]); if 2, set
19190 that GPR as well as the CC reg. */
19192 void
19193 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19195 gcc_assert (!(expand && dot));
19197 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19199 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19200 shift right. This generates better code than doing the masks without
19201 shifts, or shifting first right and then left. */
19202 int nb, ne;
19203 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19205 gcc_assert (mode == DImode);
19207 int shift = 63 - nb;
19208 if (expand)
19210 rtx tmp1 = gen_reg_rtx (DImode);
19211 rtx tmp2 = gen_reg_rtx (DImode);
19212 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19213 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19214 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19216 else
19218 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19219 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19220 emit_move_insn (operands[0], tmp);
19221 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19222 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19224 return;
19227 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19228 that does the rest. */
19229 unsigned HOST_WIDE_INT bit1 = val & -val;
19230 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19231 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19232 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19234 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19235 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19237 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19239 /* Two "no-rotate"-and-mask instructions, for SImode. */
19240 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19242 gcc_assert (mode == SImode);
19244 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19245 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19246 emit_move_insn (reg, tmp);
19247 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19248 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19249 return;
19252 gcc_assert (mode == DImode);
19254 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19255 insns; we have to do the first in SImode, because it wraps. */
19256 if (mask2 <= 0xffffffff
19257 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19259 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19260 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19261 GEN_INT (mask1));
19262 rtx reg_low = gen_lowpart (SImode, reg);
19263 emit_move_insn (reg_low, tmp);
19264 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19265 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19266 return;
19269 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19270 at the top end), rotate back and clear the other hole. */
19271 int right = exact_log2 (bit3);
19272 int left = 64 - right;
19274 /* Rotate the mask too. */
19275 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19277 if (expand)
19279 rtx tmp1 = gen_reg_rtx (DImode);
19280 rtx tmp2 = gen_reg_rtx (DImode);
19281 rtx tmp3 = gen_reg_rtx (DImode);
19282 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19283 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19284 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19285 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19287 else
19289 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19290 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19291 emit_move_insn (operands[0], tmp);
19292 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19293 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19294 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19298 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19299 for lfq and stfq insns iff the registers are hard registers. */
19302 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19304 /* We might have been passed a SUBREG. */
19305 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19306 return 0;
19308 /* We might have been passed non floating point registers. */
19309 if (!FP_REGNO_P (REGNO (reg1))
19310 || !FP_REGNO_P (REGNO (reg2)))
19311 return 0;
19313 return (REGNO (reg1) == REGNO (reg2) - 1);
19316 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19317 addr1 and addr2 must be in consecutive memory locations
19318 (addr2 == addr1 + 8). */
19321 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19323 rtx addr1, addr2;
19324 unsigned int reg1, reg2;
19325 int offset1, offset2;
19327 /* The mems cannot be volatile. */
19328 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19329 return 0;
19331 addr1 = XEXP (mem1, 0);
19332 addr2 = XEXP (mem2, 0);
19334 /* Extract an offset (if used) from the first addr. */
19335 if (GET_CODE (addr1) == PLUS)
19337 /* If not a REG, return zero. */
19338 if (GET_CODE (XEXP (addr1, 0)) != REG)
19339 return 0;
19340 else
19342 reg1 = REGNO (XEXP (addr1, 0));
19343 /* The offset must be constant! */
19344 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19345 return 0;
19346 offset1 = INTVAL (XEXP (addr1, 1));
19349 else if (GET_CODE (addr1) != REG)
19350 return 0;
19351 else
19353 reg1 = REGNO (addr1);
19354 /* This was a simple (mem (reg)) expression. Offset is 0. */
19355 offset1 = 0;
19358 /* And now for the second addr. */
19359 if (GET_CODE (addr2) == PLUS)
19361 /* If not a REG, return zero. */
19362 if (GET_CODE (XEXP (addr2, 0)) != REG)
19363 return 0;
19364 else
19366 reg2 = REGNO (XEXP (addr2, 0));
19367 /* The offset must be constant. */
19368 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19369 return 0;
19370 offset2 = INTVAL (XEXP (addr2, 1));
19373 else if (GET_CODE (addr2) != REG)
19374 return 0;
19375 else
19377 reg2 = REGNO (addr2);
19378 /* This was a simple (mem (reg)) expression. Offset is 0. */
19379 offset2 = 0;
19382 /* Both of these must have the same base register. */
19383 if (reg1 != reg2)
19384 return 0;
19386 /* The offset for the second addr must be 8 more than the first addr. */
19387 if (offset2 != offset1 + 8)
19388 return 0;
19390 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19391 instructions. */
19392 return 1;
19395 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19396 need to use DDmode, in all other cases we can use the same mode. */
19397 static machine_mode
19398 rs6000_secondary_memory_needed_mode (machine_mode mode)
19400 if (lra_in_progress && mode == SDmode)
19401 return DDmode;
19402 return mode;
19405 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19406 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19407 only work on the traditional altivec registers, note if an altivec register
19408 was chosen. */
19410 static enum rs6000_reg_type
19411 register_to_reg_type (rtx reg, bool *is_altivec)
19413 HOST_WIDE_INT regno;
19414 enum reg_class rclass;
19416 if (GET_CODE (reg) == SUBREG)
19417 reg = SUBREG_REG (reg);
19419 if (!REG_P (reg))
19420 return NO_REG_TYPE;
19422 regno = REGNO (reg);
19423 if (regno >= FIRST_PSEUDO_REGISTER)
19425 if (!lra_in_progress && !reload_completed)
19426 return PSEUDO_REG_TYPE;
19428 regno = true_regnum (reg);
19429 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19430 return PSEUDO_REG_TYPE;
19433 gcc_assert (regno >= 0);
19435 if (is_altivec && ALTIVEC_REGNO_P (regno))
19436 *is_altivec = true;
19438 rclass = rs6000_regno_regclass[regno];
19439 return reg_class_to_reg_type[(int)rclass];
19442 /* Helper function to return the cost of adding a TOC entry address. */
19444 static inline int
19445 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19447 int ret;
19449 if (TARGET_CMODEL != CMODEL_SMALL)
19450 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19452 else
19453 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19455 return ret;
19458 /* Helper function for rs6000_secondary_reload to determine whether the memory
19459 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19460 needs reloading. Return negative if the memory is not handled by the memory
19461 helper functions and to try a different reload method, 0 if no additional
19462 instructions are need, and positive to give the extra cost for the
19463 memory. */
19465 static int
19466 rs6000_secondary_reload_memory (rtx addr,
19467 enum reg_class rclass,
19468 machine_mode mode)
19470 int extra_cost = 0;
19471 rtx reg, and_arg, plus_arg0, plus_arg1;
19472 addr_mask_type addr_mask;
19473 const char *type = NULL;
19474 const char *fail_msg = NULL;
19476 if (GPR_REG_CLASS_P (rclass))
19477 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19479 else if (rclass == FLOAT_REGS)
19480 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19482 else if (rclass == ALTIVEC_REGS)
19483 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19485 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19486 else if (rclass == VSX_REGS)
19487 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19488 & ~RELOAD_REG_AND_M16);
19490 /* If the register allocator hasn't made up its mind yet on the register
19491 class to use, settle on defaults to use. */
19492 else if (rclass == NO_REGS)
19494 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19495 & ~RELOAD_REG_AND_M16);
19497 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19498 addr_mask &= ~(RELOAD_REG_INDEXED
19499 | RELOAD_REG_PRE_INCDEC
19500 | RELOAD_REG_PRE_MODIFY);
19503 else
19504 addr_mask = 0;
19506 /* If the register isn't valid in this register class, just return now. */
19507 if ((addr_mask & RELOAD_REG_VALID) == 0)
19509 if (TARGET_DEBUG_ADDR)
19511 fprintf (stderr,
19512 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19513 "not valid in class\n",
19514 GET_MODE_NAME (mode), reg_class_names[rclass]);
19515 debug_rtx (addr);
19518 return -1;
19521 switch (GET_CODE (addr))
19523 /* Does the register class supports auto update forms for this mode? We
19524 don't need a scratch register, since the powerpc only supports
19525 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19526 case PRE_INC:
19527 case PRE_DEC:
19528 reg = XEXP (addr, 0);
19529 if (!base_reg_operand (addr, GET_MODE (reg)))
19531 fail_msg = "no base register #1";
19532 extra_cost = -1;
19535 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19537 extra_cost = 1;
19538 type = "update";
19540 break;
19542 case PRE_MODIFY:
19543 reg = XEXP (addr, 0);
19544 plus_arg1 = XEXP (addr, 1);
19545 if (!base_reg_operand (reg, GET_MODE (reg))
19546 || GET_CODE (plus_arg1) != PLUS
19547 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19549 fail_msg = "bad PRE_MODIFY";
19550 extra_cost = -1;
19553 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19555 extra_cost = 1;
19556 type = "update";
19558 break;
19560 /* Do we need to simulate AND -16 to clear the bottom address bits used
19561 in VMX load/stores? Only allow the AND for vector sizes. */
19562 case AND:
19563 and_arg = XEXP (addr, 0);
19564 if (GET_MODE_SIZE (mode) != 16
19565 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19566 || INTVAL (XEXP (addr, 1)) != -16)
19568 fail_msg = "bad Altivec AND #1";
19569 extra_cost = -1;
19572 if (rclass != ALTIVEC_REGS)
19574 if (legitimate_indirect_address_p (and_arg, false))
19575 extra_cost = 1;
19577 else if (legitimate_indexed_address_p (and_arg, false))
19578 extra_cost = 2;
19580 else
19582 fail_msg = "bad Altivec AND #2";
19583 extra_cost = -1;
19586 type = "and";
19588 break;
19590 /* If this is an indirect address, make sure it is a base register. */
19591 case REG:
19592 case SUBREG:
19593 if (!legitimate_indirect_address_p (addr, false))
19595 extra_cost = 1;
19596 type = "move";
19598 break;
19600 /* If this is an indexed address, make sure the register class can handle
19601 indexed addresses for this mode. */
19602 case PLUS:
19603 plus_arg0 = XEXP (addr, 0);
19604 plus_arg1 = XEXP (addr, 1);
19606 /* (plus (plus (reg) (constant)) (constant)) is generated during
19607 push_reload processing, so handle it now. */
19608 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19610 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19612 extra_cost = 1;
19613 type = "offset";
19617 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19618 push_reload processing, so handle it now. */
19619 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19621 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19623 extra_cost = 1;
19624 type = "indexed #2";
19628 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19630 fail_msg = "no base register #2";
19631 extra_cost = -1;
19634 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19636 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19637 || !legitimate_indexed_address_p (addr, false))
19639 extra_cost = 1;
19640 type = "indexed";
19644 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19645 && CONST_INT_P (plus_arg1))
19647 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19649 extra_cost = 1;
19650 type = "vector d-form offset";
19654 /* Make sure the register class can handle offset addresses. */
19655 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19657 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19659 extra_cost = 1;
19660 type = "offset #2";
19664 else
19666 fail_msg = "bad PLUS";
19667 extra_cost = -1;
19670 break;
19672 case LO_SUM:
19673 /* Quad offsets are restricted and can't handle normal addresses. */
19674 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19676 extra_cost = -1;
19677 type = "vector d-form lo_sum";
19680 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19682 fail_msg = "bad LO_SUM";
19683 extra_cost = -1;
19686 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19688 extra_cost = 1;
19689 type = "lo_sum";
19691 break;
19693 /* Static addresses need to create a TOC entry. */
19694 case CONST:
19695 case SYMBOL_REF:
19696 case LABEL_REF:
19697 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19699 extra_cost = -1;
19700 type = "vector d-form lo_sum #2";
19703 else
19705 type = "address";
19706 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19708 break;
19710 /* TOC references look like offsetable memory. */
19711 case UNSPEC:
19712 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19714 fail_msg = "bad UNSPEC";
19715 extra_cost = -1;
19718 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19720 extra_cost = -1;
19721 type = "vector d-form lo_sum #3";
19724 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19726 extra_cost = 1;
19727 type = "toc reference";
19729 break;
19731 default:
19733 fail_msg = "bad address";
19734 extra_cost = -1;
19738 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19740 if (extra_cost < 0)
19741 fprintf (stderr,
19742 "rs6000_secondary_reload_memory error: mode = %s, "
19743 "class = %s, addr_mask = '%s', %s\n",
19744 GET_MODE_NAME (mode),
19745 reg_class_names[rclass],
19746 rs6000_debug_addr_mask (addr_mask, false),
19747 (fail_msg != NULL) ? fail_msg : "<bad address>");
19749 else
19750 fprintf (stderr,
19751 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19752 "addr_mask = '%s', extra cost = %d, %s\n",
19753 GET_MODE_NAME (mode),
19754 reg_class_names[rclass],
19755 rs6000_debug_addr_mask (addr_mask, false),
19756 extra_cost,
19757 (type) ? type : "<none>");
19759 debug_rtx (addr);
19762 return extra_cost;
19765 /* Helper function for rs6000_secondary_reload to return true if a move to a
19766 different register classe is really a simple move. */
19768 static bool
19769 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19770 enum rs6000_reg_type from_type,
19771 machine_mode mode)
19773 int size = GET_MODE_SIZE (mode);
19775 /* Add support for various direct moves available. In this function, we only
19776 look at cases where we don't need any extra registers, and one or more
19777 simple move insns are issued. Originally small integers are not allowed
19778 in FPR/VSX registers. Single precision binary floating is not a simple
19779 move because we need to convert to the single precision memory layout.
19780 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19781 need special direct move handling, which we do not support yet. */
19782 if (TARGET_DIRECT_MOVE
19783 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19784 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19786 if (TARGET_POWERPC64)
19788 /* ISA 2.07: MTVSRD or MVFVSRD. */
19789 if (size == 8)
19790 return true;
19792 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19793 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19794 return true;
19797 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19798 if (TARGET_P8_VECTOR)
19800 if (mode == SImode)
19801 return true;
19803 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19804 return true;
19807 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19808 if (mode == SDmode)
19809 return true;
19812 /* Power6+: MFTGPR or MFFGPR. */
19813 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19814 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19815 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19816 return true;
19818 /* Move to/from SPR. */
19819 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19820 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19821 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19822 return true;
19824 return false;
19827 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19828 special direct moves that involve allocating an extra register, return the
19829 insn code of the helper function if there is such a function or
19830 CODE_FOR_nothing if not. */
19832 static bool
19833 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19834 enum rs6000_reg_type from_type,
19835 machine_mode mode,
19836 secondary_reload_info *sri,
19837 bool altivec_p)
19839 bool ret = false;
19840 enum insn_code icode = CODE_FOR_nothing;
19841 int cost = 0;
19842 int size = GET_MODE_SIZE (mode);
19844 if (TARGET_POWERPC64 && size == 16)
19846 /* Handle moving 128-bit values from GPRs to VSX point registers on
19847 ISA 2.07 (power8, power9) when running in 64-bit mode using
19848 XXPERMDI to glue the two 64-bit values back together. */
19849 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19851 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19852 icode = reg_addr[mode].reload_vsx_gpr;
19855 /* Handle moving 128-bit values from VSX point registers to GPRs on
19856 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19857 bottom 64-bit value. */
19858 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19860 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19861 icode = reg_addr[mode].reload_gpr_vsx;
19865 else if (TARGET_POWERPC64 && mode == SFmode)
19867 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19869 cost = 3; /* xscvdpspn, mfvsrd, and. */
19870 icode = reg_addr[mode].reload_gpr_vsx;
19873 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19875 cost = 2; /* mtvsrz, xscvspdpn. */
19876 icode = reg_addr[mode].reload_vsx_gpr;
19880 else if (!TARGET_POWERPC64 && size == 8)
19882 /* Handle moving 64-bit values from GPRs to floating point registers on
19883 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19884 32-bit values back together. Altivec register classes must be handled
19885 specially since a different instruction is used, and the secondary
19886 reload support requires a single instruction class in the scratch
19887 register constraint. However, right now TFmode is not allowed in
19888 Altivec registers, so the pattern will never match. */
19889 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19891 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19892 icode = reg_addr[mode].reload_fpr_gpr;
19896 if (icode != CODE_FOR_nothing)
19898 ret = true;
19899 if (sri)
19901 sri->icode = icode;
19902 sri->extra_cost = cost;
19906 return ret;
19909 /* Return whether a move between two register classes can be done either
19910 directly (simple move) or via a pattern that uses a single extra temporary
19911 (using ISA 2.07's direct move in this case. */
19913 static bool
19914 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19915 enum rs6000_reg_type from_type,
19916 machine_mode mode,
19917 secondary_reload_info *sri,
19918 bool altivec_p)
19920 /* Fall back to load/store reloads if either type is not a register. */
19921 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19922 return false;
19924 /* If we haven't allocated registers yet, assume the move can be done for the
19925 standard register types. */
19926 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19927 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19928 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19929 return true;
19931 /* Moves to the same set of registers is a simple move for non-specialized
19932 registers. */
19933 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19934 return true;
19936 /* Check whether a simple move can be done directly. */
19937 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19939 if (sri)
19941 sri->icode = CODE_FOR_nothing;
19942 sri->extra_cost = 0;
19944 return true;
19947 /* Now check if we can do it in a few steps. */
19948 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19949 altivec_p);
19952 /* Inform reload about cases where moving X with a mode MODE to a register in
19953 RCLASS requires an extra scratch or immediate register. Return the class
19954 needed for the immediate register.
19956 For VSX and Altivec, we may need a register to convert sp+offset into
19957 reg+sp.
19959 For misaligned 64-bit gpr loads and stores we need a register to
19960 convert an offset address to indirect. */
19962 static reg_class_t
19963 rs6000_secondary_reload (bool in_p,
19964 rtx x,
19965 reg_class_t rclass_i,
19966 machine_mode mode,
19967 secondary_reload_info *sri)
19969 enum reg_class rclass = (enum reg_class) rclass_i;
19970 reg_class_t ret = ALL_REGS;
19971 enum insn_code icode;
19972 bool default_p = false;
19973 bool done_p = false;
19975 /* Allow subreg of memory before/during reload. */
19976 bool memory_p = (MEM_P (x)
19977 || (!reload_completed && GET_CODE (x) == SUBREG
19978 && MEM_P (SUBREG_REG (x))));
19980 sri->icode = CODE_FOR_nothing;
19981 sri->t_icode = CODE_FOR_nothing;
19982 sri->extra_cost = 0;
19983 icode = ((in_p)
19984 ? reg_addr[mode].reload_load
19985 : reg_addr[mode].reload_store);
19987 if (REG_P (x) || register_operand (x, mode))
19989 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19990 bool altivec_p = (rclass == ALTIVEC_REGS);
19991 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19993 if (!in_p)
19994 std::swap (to_type, from_type);
19996 /* Can we do a direct move of some sort? */
19997 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19998 altivec_p))
20000 icode = (enum insn_code)sri->icode;
20001 default_p = false;
20002 done_p = true;
20003 ret = NO_REGS;
20007 /* Make sure 0.0 is not reloaded or forced into memory. */
20008 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
20010 ret = NO_REGS;
20011 default_p = false;
20012 done_p = true;
20015 /* If this is a scalar floating point value and we want to load it into the
20016 traditional Altivec registers, do it via a move via a traditional floating
20017 point register, unless we have D-form addressing. Also make sure that
20018 non-zero constants use a FPR. */
20019 if (!done_p && reg_addr[mode].scalar_in_vmx_p
20020 && !mode_supports_vmx_dform (mode)
20021 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20022 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
20024 ret = FLOAT_REGS;
20025 default_p = false;
20026 done_p = true;
20029 /* Handle reload of load/stores if we have reload helper functions. */
20030 if (!done_p && icode != CODE_FOR_nothing && memory_p)
20032 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
20033 mode);
20035 if (extra_cost >= 0)
20037 done_p = true;
20038 ret = NO_REGS;
20039 if (extra_cost > 0)
20041 sri->extra_cost = extra_cost;
20042 sri->icode = icode;
20047 /* Handle unaligned loads and stores of integer registers. */
20048 if (!done_p && TARGET_POWERPC64
20049 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20050 && memory_p
20051 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
20053 rtx addr = XEXP (x, 0);
20054 rtx off = address_offset (addr);
20056 if (off != NULL_RTX)
20058 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20059 unsigned HOST_WIDE_INT offset = INTVAL (off);
20061 /* We need a secondary reload when our legitimate_address_p
20062 says the address is good (as otherwise the entire address
20063 will be reloaded), and the offset is not a multiple of
20064 four or we have an address wrap. Address wrap will only
20065 occur for LO_SUMs since legitimate_offset_address_p
20066 rejects addresses for 16-byte mems that will wrap. */
20067 if (GET_CODE (addr) == LO_SUM
20068 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20069 && ((offset & 3) != 0
20070 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
20071 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
20072 && (offset & 3) != 0))
20074 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20075 if (in_p)
20076 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
20077 : CODE_FOR_reload_di_load);
20078 else
20079 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
20080 : CODE_FOR_reload_di_store);
20081 sri->extra_cost = 2;
20082 ret = NO_REGS;
20083 done_p = true;
20085 else
20086 default_p = true;
20088 else
20089 default_p = true;
20092 if (!done_p && !TARGET_POWERPC64
20093 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20094 && memory_p
20095 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
20097 rtx addr = XEXP (x, 0);
20098 rtx off = address_offset (addr);
20100 if (off != NULL_RTX)
20102 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20103 unsigned HOST_WIDE_INT offset = INTVAL (off);
20105 /* We need a secondary reload when our legitimate_address_p
20106 says the address is good (as otherwise the entire address
20107 will be reloaded), and we have a wrap.
20109 legitimate_lo_sum_address_p allows LO_SUM addresses to
20110 have any offset so test for wrap in the low 16 bits.
20112 legitimate_offset_address_p checks for the range
20113 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20114 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20115 [0x7ff4,0x7fff] respectively, so test for the
20116 intersection of these ranges, [0x7ffc,0x7fff] and
20117 [0x7ff4,0x7ff7] respectively.
20119 Note that the address we see here may have been
20120 manipulated by legitimize_reload_address. */
20121 if (GET_CODE (addr) == LO_SUM
20122 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20123 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20125 if (in_p)
20126 sri->icode = CODE_FOR_reload_si_load;
20127 else
20128 sri->icode = CODE_FOR_reload_si_store;
20129 sri->extra_cost = 2;
20130 ret = NO_REGS;
20131 done_p = true;
20133 else
20134 default_p = true;
20136 else
20137 default_p = true;
20140 if (!done_p)
20141 default_p = true;
20143 if (default_p)
20144 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20146 gcc_assert (ret != ALL_REGS);
20148 if (TARGET_DEBUG_ADDR)
20150 fprintf (stderr,
20151 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20152 "mode = %s",
20153 reg_class_names[ret],
20154 in_p ? "true" : "false",
20155 reg_class_names[rclass],
20156 GET_MODE_NAME (mode));
20158 if (reload_completed)
20159 fputs (", after reload", stderr);
20161 if (!done_p)
20162 fputs (", done_p not set", stderr);
20164 if (default_p)
20165 fputs (", default secondary reload", stderr);
20167 if (sri->icode != CODE_FOR_nothing)
20168 fprintf (stderr, ", reload func = %s, extra cost = %d",
20169 insn_data[sri->icode].name, sri->extra_cost);
20171 else if (sri->extra_cost > 0)
20172 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20174 fputs ("\n", stderr);
20175 debug_rtx (x);
20178 return ret;
20181 /* Better tracing for rs6000_secondary_reload_inner. */
20183 static void
20184 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20185 bool store_p)
20187 rtx set, clobber;
20189 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20191 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20192 store_p ? "store" : "load");
20194 if (store_p)
20195 set = gen_rtx_SET (mem, reg);
20196 else
20197 set = gen_rtx_SET (reg, mem);
20199 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20200 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20203 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20204 ATTRIBUTE_NORETURN;
20206 static void
20207 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20208 bool store_p)
20210 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20211 gcc_unreachable ();
20214 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20215 reload helper functions. These were identified in
20216 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20217 reload, it calls the insns:
20218 reload_<RELOAD:mode>_<P:mptrsize>_store
20219 reload_<RELOAD:mode>_<P:mptrsize>_load
20221 which in turn calls this function, to do whatever is necessary to create
20222 valid addresses. */
20224 void
20225 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20227 int regno = true_regnum (reg);
20228 machine_mode mode = GET_MODE (reg);
20229 addr_mask_type addr_mask;
20230 rtx addr;
20231 rtx new_addr;
20232 rtx op_reg, op0, op1;
20233 rtx and_op;
20234 rtx cc_clobber;
20235 rtvec rv;
20237 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20238 || !base_reg_operand (scratch, GET_MODE (scratch)))
20239 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20241 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20242 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20244 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20245 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20247 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20248 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20250 else
20251 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20253 /* Make sure the mode is valid in this register class. */
20254 if ((addr_mask & RELOAD_REG_VALID) == 0)
20255 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20257 if (TARGET_DEBUG_ADDR)
20258 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20260 new_addr = addr = XEXP (mem, 0);
20261 switch (GET_CODE (addr))
20263 /* Does the register class support auto update forms for this mode? If
20264 not, do the update now. We don't need a scratch register, since the
20265 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20266 case PRE_INC:
20267 case PRE_DEC:
20268 op_reg = XEXP (addr, 0);
20269 if (!base_reg_operand (op_reg, Pmode))
20270 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20272 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20274 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20275 new_addr = op_reg;
20277 break;
20279 case PRE_MODIFY:
20280 op0 = XEXP (addr, 0);
20281 op1 = XEXP (addr, 1);
20282 if (!base_reg_operand (op0, Pmode)
20283 || GET_CODE (op1) != PLUS
20284 || !rtx_equal_p (op0, XEXP (op1, 0)))
20285 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20287 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20289 emit_insn (gen_rtx_SET (op0, op1));
20290 new_addr = reg;
20292 break;
20294 /* Do we need to simulate AND -16 to clear the bottom address bits used
20295 in VMX load/stores? */
20296 case AND:
20297 op0 = XEXP (addr, 0);
20298 op1 = XEXP (addr, 1);
20299 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20301 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20302 op_reg = op0;
20304 else if (GET_CODE (op1) == PLUS)
20306 emit_insn (gen_rtx_SET (scratch, op1));
20307 op_reg = scratch;
20310 else
20311 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20313 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20314 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20315 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20316 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20317 new_addr = scratch;
20319 break;
20321 /* If this is an indirect address, make sure it is a base register. */
20322 case REG:
20323 case SUBREG:
20324 if (!base_reg_operand (addr, GET_MODE (addr)))
20326 emit_insn (gen_rtx_SET (scratch, addr));
20327 new_addr = scratch;
20329 break;
20331 /* If this is an indexed address, make sure the register class can handle
20332 indexed addresses for this mode. */
20333 case PLUS:
20334 op0 = XEXP (addr, 0);
20335 op1 = XEXP (addr, 1);
20336 if (!base_reg_operand (op0, Pmode))
20337 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20339 else if (int_reg_operand (op1, Pmode))
20341 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20343 emit_insn (gen_rtx_SET (scratch, addr));
20344 new_addr = scratch;
20348 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20350 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20351 || !quad_address_p (addr, mode, false))
20353 emit_insn (gen_rtx_SET (scratch, addr));
20354 new_addr = scratch;
20358 /* Make sure the register class can handle offset addresses. */
20359 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20361 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20363 emit_insn (gen_rtx_SET (scratch, addr));
20364 new_addr = scratch;
20368 else
20369 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20371 break;
20373 case LO_SUM:
20374 op0 = XEXP (addr, 0);
20375 op1 = XEXP (addr, 1);
20376 if (!base_reg_operand (op0, Pmode))
20377 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20379 else if (int_reg_operand (op1, Pmode))
20381 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20383 emit_insn (gen_rtx_SET (scratch, addr));
20384 new_addr = scratch;
20388 /* Quad offsets are restricted and can't handle normal addresses. */
20389 else if (mode_supports_vsx_dform_quad (mode))
20391 emit_insn (gen_rtx_SET (scratch, addr));
20392 new_addr = scratch;
20395 /* Make sure the register class can handle offset addresses. */
20396 else if (legitimate_lo_sum_address_p (mode, addr, false))
20398 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20400 emit_insn (gen_rtx_SET (scratch, addr));
20401 new_addr = scratch;
20405 else
20406 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20408 break;
20410 case SYMBOL_REF:
20411 case CONST:
20412 case LABEL_REF:
20413 rs6000_emit_move (scratch, addr, Pmode);
20414 new_addr = scratch;
20415 break;
20417 default:
20418 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20421 /* Adjust the address if it changed. */
20422 if (addr != new_addr)
20424 mem = replace_equiv_address_nv (mem, new_addr);
20425 if (TARGET_DEBUG_ADDR)
20426 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20429 /* Now create the move. */
20430 if (store_p)
20431 emit_insn (gen_rtx_SET (mem, reg));
20432 else
20433 emit_insn (gen_rtx_SET (reg, mem));
20435 return;
20438 /* Convert reloads involving 64-bit gprs and misaligned offset
20439 addressing, or multiple 32-bit gprs and offsets that are too large,
20440 to use indirect addressing. */
20442 void
20443 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20445 int regno = true_regnum (reg);
20446 enum reg_class rclass;
20447 rtx addr;
20448 rtx scratch_or_premodify = scratch;
20450 if (TARGET_DEBUG_ADDR)
20452 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20453 store_p ? "store" : "load");
20454 fprintf (stderr, "reg:\n");
20455 debug_rtx (reg);
20456 fprintf (stderr, "mem:\n");
20457 debug_rtx (mem);
20458 fprintf (stderr, "scratch:\n");
20459 debug_rtx (scratch);
20462 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20463 gcc_assert (GET_CODE (mem) == MEM);
20464 rclass = REGNO_REG_CLASS (regno);
20465 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20466 addr = XEXP (mem, 0);
20468 if (GET_CODE (addr) == PRE_MODIFY)
20470 gcc_assert (REG_P (XEXP (addr, 0))
20471 && GET_CODE (XEXP (addr, 1)) == PLUS
20472 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20473 scratch_or_premodify = XEXP (addr, 0);
20474 if (!HARD_REGISTER_P (scratch_or_premodify))
20475 /* If we have a pseudo here then reload will have arranged
20476 to have it replaced, but only in the original insn.
20477 Use the replacement here too. */
20478 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20480 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20481 expressions from the original insn, without unsharing them.
20482 Any RTL that points into the original insn will of course
20483 have register replacements applied. That is why we don't
20484 need to look for replacements under the PLUS. */
20485 addr = XEXP (addr, 1);
20487 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20489 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20491 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20493 /* Now create the move. */
20494 if (store_p)
20495 emit_insn (gen_rtx_SET (mem, reg));
20496 else
20497 emit_insn (gen_rtx_SET (reg, mem));
20499 return;
20502 /* Given an rtx X being reloaded into a reg required to be
20503 in class CLASS, return the class of reg to actually use.
20504 In general this is just CLASS; but on some machines
20505 in some cases it is preferable to use a more restrictive class.
20507 On the RS/6000, we have to return NO_REGS when we want to reload a
20508 floating-point CONST_DOUBLE to force it to be copied to memory.
20510 We also don't want to reload integer values into floating-point
20511 registers if we can at all help it. In fact, this can
20512 cause reload to die, if it tries to generate a reload of CTR
20513 into a FP register and discovers it doesn't have the memory location
20514 required.
20516 ??? Would it be a good idea to have reload do the converse, that is
20517 try to reload floating modes into FP registers if possible?
20520 static enum reg_class
20521 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20523 machine_mode mode = GET_MODE (x);
20524 bool is_constant = CONSTANT_P (x);
20526 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20527 reload class for it. */
20528 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20529 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20530 return NO_REGS;
20532 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20533 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20534 return NO_REGS;
20536 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20537 the reloading of address expressions using PLUS into floating point
20538 registers. */
20539 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20541 if (is_constant)
20543 /* Zero is always allowed in all VSX registers. */
20544 if (x == CONST0_RTX (mode))
20545 return rclass;
20547 /* If this is a vector constant that can be formed with a few Altivec
20548 instructions, we want altivec registers. */
20549 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20550 return ALTIVEC_REGS;
20552 /* If this is an integer constant that can easily be loaded into
20553 vector registers, allow it. */
20554 if (CONST_INT_P (x))
20556 HOST_WIDE_INT value = INTVAL (x);
20558 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20559 2.06 can generate it in the Altivec registers with
20560 VSPLTI<x>. */
20561 if (value == -1)
20563 if (TARGET_P8_VECTOR)
20564 return rclass;
20565 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20566 return ALTIVEC_REGS;
20567 else
20568 return NO_REGS;
20571 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20572 a sign extend in the Altivec registers. */
20573 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20574 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20575 return ALTIVEC_REGS;
20578 /* Force constant to memory. */
20579 return NO_REGS;
20582 /* D-form addressing can easily reload the value. */
20583 if (mode_supports_vmx_dform (mode)
20584 || mode_supports_vsx_dform_quad (mode))
20585 return rclass;
20587 /* If this is a scalar floating point value and we don't have D-form
20588 addressing, prefer the traditional floating point registers so that we
20589 can use D-form (register+offset) addressing. */
20590 if (rclass == VSX_REGS
20591 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20592 return FLOAT_REGS;
20594 /* Prefer the Altivec registers if Altivec is handling the vector
20595 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20596 loads. */
20597 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20598 || mode == V1TImode)
20599 return ALTIVEC_REGS;
20601 return rclass;
20604 if (is_constant || GET_CODE (x) == PLUS)
20606 if (reg_class_subset_p (GENERAL_REGS, rclass))
20607 return GENERAL_REGS;
20608 if (reg_class_subset_p (BASE_REGS, rclass))
20609 return BASE_REGS;
20610 return NO_REGS;
20613 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20614 return GENERAL_REGS;
20616 return rclass;
20619 /* Debug version of rs6000_preferred_reload_class. */
20620 static enum reg_class
20621 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20623 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20625 fprintf (stderr,
20626 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20627 "mode = %s, x:\n",
20628 reg_class_names[ret], reg_class_names[rclass],
20629 GET_MODE_NAME (GET_MODE (x)));
20630 debug_rtx (x);
20632 return ret;
20635 /* If we are copying between FP or AltiVec registers and anything else, we need
20636 a memory location. The exception is when we are targeting ppc64 and the
20637 move to/from fpr to gpr instructions are available. Also, under VSX, you
20638 can copy vector registers from the FP register set to the Altivec register
20639 set and vice versa. */
20641 static bool
20642 rs6000_secondary_memory_needed (machine_mode mode,
20643 reg_class_t from_class,
20644 reg_class_t to_class)
20646 enum rs6000_reg_type from_type, to_type;
20647 bool altivec_p = ((from_class == ALTIVEC_REGS)
20648 || (to_class == ALTIVEC_REGS));
20650 /* If a simple/direct move is available, we don't need secondary memory */
20651 from_type = reg_class_to_reg_type[(int)from_class];
20652 to_type = reg_class_to_reg_type[(int)to_class];
20654 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20655 (secondary_reload_info *)0, altivec_p))
20656 return false;
20658 /* If we have a floating point or vector register class, we need to use
20659 memory to transfer the data. */
20660 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20661 return true;
20663 return false;
20666 /* Debug version of rs6000_secondary_memory_needed. */
20667 static bool
20668 rs6000_debug_secondary_memory_needed (machine_mode mode,
20669 reg_class_t from_class,
20670 reg_class_t to_class)
20672 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20674 fprintf (stderr,
20675 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20676 "to_class = %s, mode = %s\n",
20677 ret ? "true" : "false",
20678 reg_class_names[from_class],
20679 reg_class_names[to_class],
20680 GET_MODE_NAME (mode));
20682 return ret;
20685 /* Return the register class of a scratch register needed to copy IN into
20686 or out of a register in RCLASS in MODE. If it can be done directly,
20687 NO_REGS is returned. */
20689 static enum reg_class
20690 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20691 rtx in)
20693 int regno;
20695 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20696 #if TARGET_MACHO
20697 && MACHOPIC_INDIRECT
20698 #endif
20701 /* We cannot copy a symbolic operand directly into anything
20702 other than BASE_REGS for TARGET_ELF. So indicate that a
20703 register from BASE_REGS is needed as an intermediate
20704 register.
20706 On Darwin, pic addresses require a load from memory, which
20707 needs a base register. */
20708 if (rclass != BASE_REGS
20709 && (GET_CODE (in) == SYMBOL_REF
20710 || GET_CODE (in) == HIGH
20711 || GET_CODE (in) == LABEL_REF
20712 || GET_CODE (in) == CONST))
20713 return BASE_REGS;
20716 if (GET_CODE (in) == REG)
20718 regno = REGNO (in);
20719 if (regno >= FIRST_PSEUDO_REGISTER)
20721 regno = true_regnum (in);
20722 if (regno >= FIRST_PSEUDO_REGISTER)
20723 regno = -1;
20726 else if (GET_CODE (in) == SUBREG)
20728 regno = true_regnum (in);
20729 if (regno >= FIRST_PSEUDO_REGISTER)
20730 regno = -1;
20732 else
20733 regno = -1;
20735 /* If we have VSX register moves, prefer moving scalar values between
20736 Altivec registers and GPR by going via an FPR (and then via memory)
20737 instead of reloading the secondary memory address for Altivec moves. */
20738 if (TARGET_VSX
20739 && GET_MODE_SIZE (mode) < 16
20740 && !mode_supports_vmx_dform (mode)
20741 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20742 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20743 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20744 && (regno >= 0 && INT_REGNO_P (regno)))))
20745 return FLOAT_REGS;
20747 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20748 into anything. */
20749 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20750 || (regno >= 0 && INT_REGNO_P (regno)))
20751 return NO_REGS;
20753 /* Constants, memory, and VSX registers can go into VSX registers (both the
20754 traditional floating point and the altivec registers). */
20755 if (rclass == VSX_REGS
20756 && (regno == -1 || VSX_REGNO_P (regno)))
20757 return NO_REGS;
20759 /* Constants, memory, and FP registers can go into FP registers. */
20760 if ((regno == -1 || FP_REGNO_P (regno))
20761 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20762 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20764 /* Memory, and AltiVec registers can go into AltiVec registers. */
20765 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20766 && rclass == ALTIVEC_REGS)
20767 return NO_REGS;
20769 /* We can copy among the CR registers. */
20770 if ((rclass == CR_REGS || rclass == CR0_REGS)
20771 && regno >= 0 && CR_REGNO_P (regno))
20772 return NO_REGS;
20774 /* Otherwise, we need GENERAL_REGS. */
20775 return GENERAL_REGS;
20778 /* Debug version of rs6000_secondary_reload_class. */
20779 static enum reg_class
20780 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20781 machine_mode mode, rtx in)
20783 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20784 fprintf (stderr,
20785 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20786 "mode = %s, input rtx:\n",
20787 reg_class_names[ret], reg_class_names[rclass],
20788 GET_MODE_NAME (mode));
20789 debug_rtx (in);
20791 return ret;
20794 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20796 static bool
20797 rs6000_can_change_mode_class (machine_mode from,
20798 machine_mode to,
20799 reg_class_t rclass)
20801 unsigned from_size = GET_MODE_SIZE (from);
20802 unsigned to_size = GET_MODE_SIZE (to);
20804 if (from_size != to_size)
20806 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20808 if (reg_classes_intersect_p (xclass, rclass))
20810 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20811 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20812 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20813 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20815 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20816 single register under VSX because the scalar part of the register
20817 is in the upper 64-bits, and not the lower 64-bits. Types like
20818 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20819 IEEE floating point can't overlap, and neither can small
20820 values. */
20822 if (to_float128_vector_p && from_float128_vector_p)
20823 return true;
20825 else if (to_float128_vector_p || from_float128_vector_p)
20826 return false;
20828 /* TDmode in floating-mode registers must always go into a register
20829 pair with the most significant word in the even-numbered register
20830 to match ISA requirements. In little-endian mode, this does not
20831 match subreg numbering, so we cannot allow subregs. */
20832 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20833 return false;
20835 if (from_size < 8 || to_size < 8)
20836 return false;
20838 if (from_size == 8 && (8 * to_nregs) != to_size)
20839 return false;
20841 if (to_size == 8 && (8 * from_nregs) != from_size)
20842 return false;
20844 return true;
20846 else
20847 return true;
20850 /* Since the VSX register set includes traditional floating point registers
20851 and altivec registers, just check for the size being different instead of
20852 trying to check whether the modes are vector modes. Otherwise it won't
20853 allow say DF and DI to change classes. For types like TFmode and TDmode
20854 that take 2 64-bit registers, rather than a single 128-bit register, don't
20855 allow subregs of those types to other 128 bit types. */
20856 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20858 unsigned num_regs = (from_size + 15) / 16;
20859 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20860 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20861 return false;
20863 return (from_size == 8 || from_size == 16);
20866 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20867 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20868 return false;
20870 return true;
20873 /* Debug version of rs6000_can_change_mode_class. */
20874 static bool
20875 rs6000_debug_can_change_mode_class (machine_mode from,
20876 machine_mode to,
20877 reg_class_t rclass)
20879 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20881 fprintf (stderr,
20882 "rs6000_can_change_mode_class, return %s, from = %s, "
20883 "to = %s, rclass = %s\n",
20884 ret ? "true" : "false",
20885 GET_MODE_NAME (from), GET_MODE_NAME (to),
20886 reg_class_names[rclass]);
20888 return ret;
20891 /* Return a string to do a move operation of 128 bits of data. */
20893 const char *
20894 rs6000_output_move_128bit (rtx operands[])
20896 rtx dest = operands[0];
20897 rtx src = operands[1];
20898 machine_mode mode = GET_MODE (dest);
20899 int dest_regno;
20900 int src_regno;
20901 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20902 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20904 if (REG_P (dest))
20906 dest_regno = REGNO (dest);
20907 dest_gpr_p = INT_REGNO_P (dest_regno);
20908 dest_fp_p = FP_REGNO_P (dest_regno);
20909 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20910 dest_vsx_p = dest_fp_p | dest_vmx_p;
20912 else
20914 dest_regno = -1;
20915 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20918 if (REG_P (src))
20920 src_regno = REGNO (src);
20921 src_gpr_p = INT_REGNO_P (src_regno);
20922 src_fp_p = FP_REGNO_P (src_regno);
20923 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20924 src_vsx_p = src_fp_p | src_vmx_p;
20926 else
20928 src_regno = -1;
20929 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20932 /* Register moves. */
20933 if (dest_regno >= 0 && src_regno >= 0)
20935 if (dest_gpr_p)
20937 if (src_gpr_p)
20938 return "#";
20940 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20941 return (WORDS_BIG_ENDIAN
20942 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20943 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20945 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20946 return "#";
20949 else if (TARGET_VSX && dest_vsx_p)
20951 if (src_vsx_p)
20952 return "xxlor %x0,%x1,%x1";
20954 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20955 return (WORDS_BIG_ENDIAN
20956 ? "mtvsrdd %x0,%1,%L1"
20957 : "mtvsrdd %x0,%L1,%1");
20959 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20960 return "#";
20963 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20964 return "vor %0,%1,%1";
20966 else if (dest_fp_p && src_fp_p)
20967 return "#";
20970 /* Loads. */
20971 else if (dest_regno >= 0 && MEM_P (src))
20973 if (dest_gpr_p)
20975 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20976 return "lq %0,%1";
20977 else
20978 return "#";
20981 else if (TARGET_ALTIVEC && dest_vmx_p
20982 && altivec_indexed_or_indirect_operand (src, mode))
20983 return "lvx %0,%y1";
20985 else if (TARGET_VSX && dest_vsx_p)
20987 if (mode_supports_vsx_dform_quad (mode)
20988 && quad_address_p (XEXP (src, 0), mode, true))
20989 return "lxv %x0,%1";
20991 else if (TARGET_P9_VECTOR)
20992 return "lxvx %x0,%y1";
20994 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20995 return "lxvw4x %x0,%y1";
20997 else
20998 return "lxvd2x %x0,%y1";
21001 else if (TARGET_ALTIVEC && dest_vmx_p)
21002 return "lvx %0,%y1";
21004 else if (dest_fp_p)
21005 return "#";
21008 /* Stores. */
21009 else if (src_regno >= 0 && MEM_P (dest))
21011 if (src_gpr_p)
21013 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
21014 return "stq %1,%0";
21015 else
21016 return "#";
21019 else if (TARGET_ALTIVEC && src_vmx_p
21020 && altivec_indexed_or_indirect_operand (src, mode))
21021 return "stvx %1,%y0";
21023 else if (TARGET_VSX && src_vsx_p)
21025 if (mode_supports_vsx_dform_quad (mode)
21026 && quad_address_p (XEXP (dest, 0), mode, true))
21027 return "stxv %x1,%0";
21029 else if (TARGET_P9_VECTOR)
21030 return "stxvx %x1,%y0";
21032 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
21033 return "stxvw4x %x1,%y0";
21035 else
21036 return "stxvd2x %x1,%y0";
21039 else if (TARGET_ALTIVEC && src_vmx_p)
21040 return "stvx %1,%y0";
21042 else if (src_fp_p)
21043 return "#";
21046 /* Constants. */
21047 else if (dest_regno >= 0
21048 && (GET_CODE (src) == CONST_INT
21049 || GET_CODE (src) == CONST_WIDE_INT
21050 || GET_CODE (src) == CONST_DOUBLE
21051 || GET_CODE (src) == CONST_VECTOR))
21053 if (dest_gpr_p)
21054 return "#";
21056 else if ((dest_vmx_p && TARGET_ALTIVEC)
21057 || (dest_vsx_p && TARGET_VSX))
21058 return output_vec_const_move (operands);
21061 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
21064 /* Validate a 128-bit move. */
21065 bool
21066 rs6000_move_128bit_ok_p (rtx operands[])
21068 machine_mode mode = GET_MODE (operands[0]);
21069 return (gpc_reg_operand (operands[0], mode)
21070 || gpc_reg_operand (operands[1], mode));
21073 /* Return true if a 128-bit move needs to be split. */
21074 bool
21075 rs6000_split_128bit_ok_p (rtx operands[])
21077 if (!reload_completed)
21078 return false;
21080 if (!gpr_or_gpr_p (operands[0], operands[1]))
21081 return false;
21083 if (quad_load_store_p (operands[0], operands[1]))
21084 return false;
21086 return true;
21090 /* Given a comparison operation, return the bit number in CCR to test. We
21091 know this is a valid comparison.
21093 SCC_P is 1 if this is for an scc. That means that %D will have been
21094 used instead of %C, so the bits will be in different places.
21096 Return -1 if OP isn't a valid comparison for some reason. */
21099 ccr_bit (rtx op, int scc_p)
21101 enum rtx_code code = GET_CODE (op);
21102 machine_mode cc_mode;
21103 int cc_regnum;
21104 int base_bit;
21105 rtx reg;
21107 if (!COMPARISON_P (op))
21108 return -1;
21110 reg = XEXP (op, 0);
21112 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21114 cc_mode = GET_MODE (reg);
21115 cc_regnum = REGNO (reg);
21116 base_bit = 4 * (cc_regnum - CR0_REGNO);
21118 validate_condition_mode (code, cc_mode);
21120 /* When generating a sCOND operation, only positive conditions are
21121 allowed. */
21122 gcc_assert (!scc_p
21123 || code == EQ || code == GT || code == LT || code == UNORDERED
21124 || code == GTU || code == LTU);
21126 switch (code)
21128 case NE:
21129 return scc_p ? base_bit + 3 : base_bit + 2;
21130 case EQ:
21131 return base_bit + 2;
21132 case GT: case GTU: case UNLE:
21133 return base_bit + 1;
21134 case LT: case LTU: case UNGE:
21135 return base_bit;
21136 case ORDERED: case UNORDERED:
21137 return base_bit + 3;
21139 case GE: case GEU:
21140 /* If scc, we will have done a cror to put the bit in the
21141 unordered position. So test that bit. For integer, this is ! LT
21142 unless this is an scc insn. */
21143 return scc_p ? base_bit + 3 : base_bit;
21145 case LE: case LEU:
21146 return scc_p ? base_bit + 3 : base_bit + 1;
21148 default:
21149 gcc_unreachable ();
21153 /* Return the GOT register. */
21156 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21158 /* The second flow pass currently (June 1999) can't update
21159 regs_ever_live without disturbing other parts of the compiler, so
21160 update it here to make the prolog/epilogue code happy. */
21161 if (!can_create_pseudo_p ()
21162 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21163 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21165 crtl->uses_pic_offset_table = 1;
21167 return pic_offset_table_rtx;
21170 static rs6000_stack_t stack_info;
21172 /* Function to init struct machine_function.
21173 This will be called, via a pointer variable,
21174 from push_function_context. */
21176 static struct machine_function *
21177 rs6000_init_machine_status (void)
21179 stack_info.reload_completed = 0;
21180 return ggc_cleared_alloc<machine_function> ();
21183 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21185 /* Write out a function code label. */
21187 void
21188 rs6000_output_function_entry (FILE *file, const char *fname)
21190 if (fname[0] != '.')
21192 switch (DEFAULT_ABI)
21194 default:
21195 gcc_unreachable ();
21197 case ABI_AIX:
21198 if (DOT_SYMBOLS)
21199 putc ('.', file);
21200 else
21201 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21202 break;
21204 case ABI_ELFv2:
21205 case ABI_V4:
21206 case ABI_DARWIN:
21207 break;
21211 RS6000_OUTPUT_BASENAME (file, fname);
21214 /* Print an operand. Recognize special options, documented below. */
21216 #if TARGET_ELF
21217 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21218 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21219 #else
21220 #define SMALL_DATA_RELOC "sda21"
21221 #define SMALL_DATA_REG 0
21222 #endif
21224 void
21225 print_operand (FILE *file, rtx x, int code)
21227 int i;
21228 unsigned HOST_WIDE_INT uval;
21230 switch (code)
21232 /* %a is output_address. */
21234 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21235 output_operand. */
21237 case 'D':
21238 /* Like 'J' but get to the GT bit only. */
21239 gcc_assert (REG_P (x));
21241 /* Bit 1 is GT bit. */
21242 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21244 /* Add one for shift count in rlinm for scc. */
21245 fprintf (file, "%d", i + 1);
21246 return;
21248 case 'e':
21249 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21250 if (! INT_P (x))
21252 output_operand_lossage ("invalid %%e value");
21253 return;
21256 uval = INTVAL (x);
21257 if ((uval & 0xffff) == 0 && uval != 0)
21258 putc ('s', file);
21259 return;
21261 case 'E':
21262 /* X is a CR register. Print the number of the EQ bit of the CR */
21263 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21264 output_operand_lossage ("invalid %%E value");
21265 else
21266 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21267 return;
21269 case 'f':
21270 /* X is a CR register. Print the shift count needed to move it
21271 to the high-order four bits. */
21272 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21273 output_operand_lossage ("invalid %%f value");
21274 else
21275 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21276 return;
21278 case 'F':
21279 /* Similar, but print the count for the rotate in the opposite
21280 direction. */
21281 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21282 output_operand_lossage ("invalid %%F value");
21283 else
21284 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21285 return;
21287 case 'G':
21288 /* X is a constant integer. If it is negative, print "m",
21289 otherwise print "z". This is to make an aze or ame insn. */
21290 if (GET_CODE (x) != CONST_INT)
21291 output_operand_lossage ("invalid %%G value");
21292 else if (INTVAL (x) >= 0)
21293 putc ('z', file);
21294 else
21295 putc ('m', file);
21296 return;
21298 case 'h':
21299 /* If constant, output low-order five bits. Otherwise, write
21300 normally. */
21301 if (INT_P (x))
21302 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21303 else
21304 print_operand (file, x, 0);
21305 return;
21307 case 'H':
21308 /* If constant, output low-order six bits. Otherwise, write
21309 normally. */
21310 if (INT_P (x))
21311 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21312 else
21313 print_operand (file, x, 0);
21314 return;
21316 case 'I':
21317 /* Print `i' if this is a constant, else nothing. */
21318 if (INT_P (x))
21319 putc ('i', file);
21320 return;
21322 case 'j':
21323 /* Write the bit number in CCR for jump. */
21324 i = ccr_bit (x, 0);
21325 if (i == -1)
21326 output_operand_lossage ("invalid %%j code");
21327 else
21328 fprintf (file, "%d", i);
21329 return;
21331 case 'J':
21332 /* Similar, but add one for shift count in rlinm for scc and pass
21333 scc flag to `ccr_bit'. */
21334 i = ccr_bit (x, 1);
21335 if (i == -1)
21336 output_operand_lossage ("invalid %%J code");
21337 else
21338 /* If we want bit 31, write a shift count of zero, not 32. */
21339 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21340 return;
21342 case 'k':
21343 /* X must be a constant. Write the 1's complement of the
21344 constant. */
21345 if (! INT_P (x))
21346 output_operand_lossage ("invalid %%k value");
21347 else
21348 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21349 return;
21351 case 'K':
21352 /* X must be a symbolic constant on ELF. Write an
21353 expression suitable for an 'addi' that adds in the low 16
21354 bits of the MEM. */
21355 if (GET_CODE (x) == CONST)
21357 if (GET_CODE (XEXP (x, 0)) != PLUS
21358 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21359 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21360 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21361 output_operand_lossage ("invalid %%K value");
21363 print_operand_address (file, x);
21364 fputs ("@l", file);
21365 return;
21367 /* %l is output_asm_label. */
21369 case 'L':
21370 /* Write second word of DImode or DFmode reference. Works on register
21371 or non-indexed memory only. */
21372 if (REG_P (x))
21373 fputs (reg_names[REGNO (x) + 1], file);
21374 else if (MEM_P (x))
21376 machine_mode mode = GET_MODE (x);
21377 /* Handle possible auto-increment. Since it is pre-increment and
21378 we have already done it, we can just use an offset of word. */
21379 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21380 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21381 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21382 UNITS_PER_WORD));
21383 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21384 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21385 UNITS_PER_WORD));
21386 else
21387 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21388 UNITS_PER_WORD),
21389 0));
21391 if (small_data_operand (x, GET_MODE (x)))
21392 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21393 reg_names[SMALL_DATA_REG]);
21395 return;
21397 case 'N': /* Unused */
21398 /* Write the number of elements in the vector times 4. */
21399 if (GET_CODE (x) != PARALLEL)
21400 output_operand_lossage ("invalid %%N value");
21401 else
21402 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21403 return;
21405 case 'O': /* Unused */
21406 /* Similar, but subtract 1 first. */
21407 if (GET_CODE (x) != PARALLEL)
21408 output_operand_lossage ("invalid %%O value");
21409 else
21410 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21411 return;
21413 case 'p':
21414 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21415 if (! INT_P (x)
21416 || INTVAL (x) < 0
21417 || (i = exact_log2 (INTVAL (x))) < 0)
21418 output_operand_lossage ("invalid %%p value");
21419 else
21420 fprintf (file, "%d", i);
21421 return;
21423 case 'P':
21424 /* The operand must be an indirect memory reference. The result
21425 is the register name. */
21426 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21427 || REGNO (XEXP (x, 0)) >= 32)
21428 output_operand_lossage ("invalid %%P value");
21429 else
21430 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21431 return;
21433 case 'q':
21434 /* This outputs the logical code corresponding to a boolean
21435 expression. The expression may have one or both operands
21436 negated (if one, only the first one). For condition register
21437 logical operations, it will also treat the negated
21438 CR codes as NOTs, but not handle NOTs of them. */
21440 const char *const *t = 0;
21441 const char *s;
21442 enum rtx_code code = GET_CODE (x);
21443 static const char * const tbl[3][3] = {
21444 { "and", "andc", "nor" },
21445 { "or", "orc", "nand" },
21446 { "xor", "eqv", "xor" } };
21448 if (code == AND)
21449 t = tbl[0];
21450 else if (code == IOR)
21451 t = tbl[1];
21452 else if (code == XOR)
21453 t = tbl[2];
21454 else
21455 output_operand_lossage ("invalid %%q value");
21457 if (GET_CODE (XEXP (x, 0)) != NOT)
21458 s = t[0];
21459 else
21461 if (GET_CODE (XEXP (x, 1)) == NOT)
21462 s = t[2];
21463 else
21464 s = t[1];
21467 fputs (s, file);
21469 return;
21471 case 'Q':
21472 if (! TARGET_MFCRF)
21473 return;
21474 fputc (',', file);
21475 /* FALLTHRU */
21477 case 'R':
21478 /* X is a CR register. Print the mask for `mtcrf'. */
21479 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21480 output_operand_lossage ("invalid %%R value");
21481 else
21482 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21483 return;
21485 case 's':
21486 /* Low 5 bits of 32 - value */
21487 if (! INT_P (x))
21488 output_operand_lossage ("invalid %%s value");
21489 else
21490 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21491 return;
21493 case 't':
21494 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21495 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21497 /* Bit 3 is OV bit. */
21498 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21500 /* If we want bit 31, write a shift count of zero, not 32. */
21501 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21502 return;
21504 case 'T':
21505 /* Print the symbolic name of a branch target register. */
21506 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21507 && REGNO (x) != CTR_REGNO))
21508 output_operand_lossage ("invalid %%T value");
21509 else if (REGNO (x) == LR_REGNO)
21510 fputs ("lr", file);
21511 else
21512 fputs ("ctr", file);
21513 return;
21515 case 'u':
21516 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21517 for use in unsigned operand. */
21518 if (! INT_P (x))
21520 output_operand_lossage ("invalid %%u value");
21521 return;
21524 uval = INTVAL (x);
21525 if ((uval & 0xffff) == 0)
21526 uval >>= 16;
21528 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21529 return;
21531 case 'v':
21532 /* High-order 16 bits of constant for use in signed operand. */
21533 if (! INT_P (x))
21534 output_operand_lossage ("invalid %%v value");
21535 else
21536 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21537 (INTVAL (x) >> 16) & 0xffff);
21538 return;
21540 case 'U':
21541 /* Print `u' if this has an auto-increment or auto-decrement. */
21542 if (MEM_P (x)
21543 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21544 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21545 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21546 putc ('u', file);
21547 return;
21549 case 'V':
21550 /* Print the trap code for this operand. */
21551 switch (GET_CODE (x))
21553 case EQ:
21554 fputs ("eq", file); /* 4 */
21555 break;
21556 case NE:
21557 fputs ("ne", file); /* 24 */
21558 break;
21559 case LT:
21560 fputs ("lt", file); /* 16 */
21561 break;
21562 case LE:
21563 fputs ("le", file); /* 20 */
21564 break;
21565 case GT:
21566 fputs ("gt", file); /* 8 */
21567 break;
21568 case GE:
21569 fputs ("ge", file); /* 12 */
21570 break;
21571 case LTU:
21572 fputs ("llt", file); /* 2 */
21573 break;
21574 case LEU:
21575 fputs ("lle", file); /* 6 */
21576 break;
21577 case GTU:
21578 fputs ("lgt", file); /* 1 */
21579 break;
21580 case GEU:
21581 fputs ("lge", file); /* 5 */
21582 break;
21583 default:
21584 gcc_unreachable ();
21586 break;
21588 case 'w':
21589 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21590 normally. */
21591 if (INT_P (x))
21592 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21593 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21594 else
21595 print_operand (file, x, 0);
21596 return;
21598 case 'x':
21599 /* X is a FPR or Altivec register used in a VSX context. */
21600 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21601 output_operand_lossage ("invalid %%x value");
21602 else
21604 int reg = REGNO (x);
21605 int vsx_reg = (FP_REGNO_P (reg)
21606 ? reg - 32
21607 : reg - FIRST_ALTIVEC_REGNO + 32);
21609 #ifdef TARGET_REGNAMES
21610 if (TARGET_REGNAMES)
21611 fprintf (file, "%%vs%d", vsx_reg);
21612 else
21613 #endif
21614 fprintf (file, "%d", vsx_reg);
21616 return;
21618 case 'X':
21619 if (MEM_P (x)
21620 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21621 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21622 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21623 putc ('x', file);
21624 return;
21626 case 'Y':
21627 /* Like 'L', for third word of TImode/PTImode */
21628 if (REG_P (x))
21629 fputs (reg_names[REGNO (x) + 2], file);
21630 else if (MEM_P (x))
21632 machine_mode mode = GET_MODE (x);
21633 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21634 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21635 output_address (mode, plus_constant (Pmode,
21636 XEXP (XEXP (x, 0), 0), 8));
21637 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21638 output_address (mode, plus_constant (Pmode,
21639 XEXP (XEXP (x, 0), 0), 8));
21640 else
21641 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21642 if (small_data_operand (x, GET_MODE (x)))
21643 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21644 reg_names[SMALL_DATA_REG]);
21646 return;
21648 case 'z':
21649 /* X is a SYMBOL_REF. Write out the name preceded by a
21650 period and without any trailing data in brackets. Used for function
21651 names. If we are configured for System V (or the embedded ABI) on
21652 the PowerPC, do not emit the period, since those systems do not use
21653 TOCs and the like. */
21654 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21656 /* For macho, check to see if we need a stub. */
21657 if (TARGET_MACHO)
21659 const char *name = XSTR (x, 0);
21660 #if TARGET_MACHO
21661 if (darwin_emit_branch_islands
21662 && MACHOPIC_INDIRECT
21663 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21664 name = machopic_indirection_name (x, /*stub_p=*/true);
21665 #endif
21666 assemble_name (file, name);
21668 else if (!DOT_SYMBOLS)
21669 assemble_name (file, XSTR (x, 0));
21670 else
21671 rs6000_output_function_entry (file, XSTR (x, 0));
21672 return;
21674 case 'Z':
21675 /* Like 'L', for last word of TImode/PTImode. */
21676 if (REG_P (x))
21677 fputs (reg_names[REGNO (x) + 3], file);
21678 else if (MEM_P (x))
21680 machine_mode mode = GET_MODE (x);
21681 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21682 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21683 output_address (mode, plus_constant (Pmode,
21684 XEXP (XEXP (x, 0), 0), 12));
21685 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21686 output_address (mode, plus_constant (Pmode,
21687 XEXP (XEXP (x, 0), 0), 12));
21688 else
21689 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21690 if (small_data_operand (x, GET_MODE (x)))
21691 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21692 reg_names[SMALL_DATA_REG]);
21694 return;
21696 /* Print AltiVec memory operand. */
21697 case 'y':
21699 rtx tmp;
21701 gcc_assert (MEM_P (x));
21703 tmp = XEXP (x, 0);
21705 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21706 && GET_CODE (tmp) == AND
21707 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21708 && INTVAL (XEXP (tmp, 1)) == -16)
21709 tmp = XEXP (tmp, 0);
21710 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21711 && GET_CODE (tmp) == PRE_MODIFY)
21712 tmp = XEXP (tmp, 1);
21713 if (REG_P (tmp))
21714 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21715 else
21717 if (GET_CODE (tmp) != PLUS
21718 || !REG_P (XEXP (tmp, 0))
21719 || !REG_P (XEXP (tmp, 1)))
21721 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21722 break;
21725 if (REGNO (XEXP (tmp, 0)) == 0)
21726 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21727 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21728 else
21729 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21730 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21732 break;
21735 case 0:
21736 if (REG_P (x))
21737 fprintf (file, "%s", reg_names[REGNO (x)]);
21738 else if (MEM_P (x))
21740 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21741 know the width from the mode. */
21742 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21743 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21744 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21745 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21746 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21747 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21748 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21749 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21750 else
21751 output_address (GET_MODE (x), XEXP (x, 0));
21753 else
21755 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21756 /* This hack along with a corresponding hack in
21757 rs6000_output_addr_const_extra arranges to output addends
21758 where the assembler expects to find them. eg.
21759 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21760 without this hack would be output as "x@toc+4". We
21761 want "x+4@toc". */
21762 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21763 else
21764 output_addr_const (file, x);
21766 return;
21768 case '&':
21769 if (const char *name = get_some_local_dynamic_name ())
21770 assemble_name (file, name);
21771 else
21772 output_operand_lossage ("'%%&' used without any "
21773 "local dynamic TLS references");
21774 return;
21776 default:
21777 output_operand_lossage ("invalid %%xn code");
21781 /* Print the address of an operand. */
21783 void
21784 print_operand_address (FILE *file, rtx x)
21786 if (REG_P (x))
21787 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21788 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21789 || GET_CODE (x) == LABEL_REF)
21791 output_addr_const (file, x);
21792 if (small_data_operand (x, GET_MODE (x)))
21793 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21794 reg_names[SMALL_DATA_REG]);
21795 else
21796 gcc_assert (!TARGET_TOC);
21798 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21799 && REG_P (XEXP (x, 1)))
21801 if (REGNO (XEXP (x, 0)) == 0)
21802 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21803 reg_names[ REGNO (XEXP (x, 0)) ]);
21804 else
21805 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21806 reg_names[ REGNO (XEXP (x, 1)) ]);
21808 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21809 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21810 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21811 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21812 #if TARGET_MACHO
21813 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21814 && CONSTANT_P (XEXP (x, 1)))
21816 fprintf (file, "lo16(");
21817 output_addr_const (file, XEXP (x, 1));
21818 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21820 #endif
21821 #if TARGET_ELF
21822 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21823 && CONSTANT_P (XEXP (x, 1)))
21825 output_addr_const (file, XEXP (x, 1));
21826 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21828 #endif
21829 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21831 /* This hack along with a corresponding hack in
21832 rs6000_output_addr_const_extra arranges to output addends
21833 where the assembler expects to find them. eg.
21834 (lo_sum (reg 9)
21835 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21836 without this hack would be output as "x@toc+8@l(9)". We
21837 want "x+8@toc@l(9)". */
21838 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21839 if (GET_CODE (x) == LO_SUM)
21840 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21841 else
21842 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21844 else
21845 gcc_unreachable ();
21848 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21850 static bool
21851 rs6000_output_addr_const_extra (FILE *file, rtx x)
21853 if (GET_CODE (x) == UNSPEC)
21854 switch (XINT (x, 1))
21856 case UNSPEC_TOCREL:
21857 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21858 && REG_P (XVECEXP (x, 0, 1))
21859 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21860 output_addr_const (file, XVECEXP (x, 0, 0));
21861 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21863 if (INTVAL (tocrel_offset_oac) >= 0)
21864 fprintf (file, "+");
21865 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21867 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21869 putc ('-', file);
21870 assemble_name (file, toc_label_name);
21871 need_toc_init = 1;
21873 else if (TARGET_ELF)
21874 fputs ("@toc", file);
21875 return true;
21877 #if TARGET_MACHO
21878 case UNSPEC_MACHOPIC_OFFSET:
21879 output_addr_const (file, XVECEXP (x, 0, 0));
21880 putc ('-', file);
21881 machopic_output_function_base_name (file);
21882 return true;
21883 #endif
21885 return false;
21888 /* Target hook for assembling integer objects. The PowerPC version has
21889 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21890 is defined. It also needs to handle DI-mode objects on 64-bit
21891 targets. */
21893 static bool
21894 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21896 #ifdef RELOCATABLE_NEEDS_FIXUP
21897 /* Special handling for SI values. */
21898 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21900 static int recurse = 0;
21902 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21903 the .fixup section. Since the TOC section is already relocated, we
21904 don't need to mark it here. We used to skip the text section, but it
21905 should never be valid for relocated addresses to be placed in the text
21906 section. */
21907 if (DEFAULT_ABI == ABI_V4
21908 && (TARGET_RELOCATABLE || flag_pic > 1)
21909 && in_section != toc_section
21910 && !recurse
21911 && !CONST_SCALAR_INT_P (x)
21912 && CONSTANT_P (x))
21914 char buf[256];
21916 recurse = 1;
21917 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21918 fixuplabelno++;
21919 ASM_OUTPUT_LABEL (asm_out_file, buf);
21920 fprintf (asm_out_file, "\t.long\t(");
21921 output_addr_const (asm_out_file, x);
21922 fprintf (asm_out_file, ")@fixup\n");
21923 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21924 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21925 fprintf (asm_out_file, "\t.long\t");
21926 assemble_name (asm_out_file, buf);
21927 fprintf (asm_out_file, "\n\t.previous\n");
21928 recurse = 0;
21929 return true;
21931 /* Remove initial .'s to turn a -mcall-aixdesc function
21932 address into the address of the descriptor, not the function
21933 itself. */
21934 else if (GET_CODE (x) == SYMBOL_REF
21935 && XSTR (x, 0)[0] == '.'
21936 && DEFAULT_ABI == ABI_AIX)
21938 const char *name = XSTR (x, 0);
21939 while (*name == '.')
21940 name++;
21942 fprintf (asm_out_file, "\t.long\t%s\n", name);
21943 return true;
21946 #endif /* RELOCATABLE_NEEDS_FIXUP */
21947 return default_assemble_integer (x, size, aligned_p);
21950 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21951 /* Emit an assembler directive to set symbol visibility for DECL to
21952 VISIBILITY_TYPE. */
21954 static void
21955 rs6000_assemble_visibility (tree decl, int vis)
21957 if (TARGET_XCOFF)
21958 return;
21960 /* Functions need to have their entry point symbol visibility set as
21961 well as their descriptor symbol visibility. */
21962 if (DEFAULT_ABI == ABI_AIX
21963 && DOT_SYMBOLS
21964 && TREE_CODE (decl) == FUNCTION_DECL)
21966 static const char * const visibility_types[] = {
21967 NULL, "protected", "hidden", "internal"
21970 const char *name, *type;
21972 name = ((* targetm.strip_name_encoding)
21973 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21974 type = visibility_types[vis];
21976 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21977 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21979 else
21980 default_assemble_visibility (decl, vis);
21982 #endif
21984 enum rtx_code
21985 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21987 /* Reversal of FP compares takes care -- an ordered compare
21988 becomes an unordered compare and vice versa. */
21989 if (mode == CCFPmode
21990 && (!flag_finite_math_only
21991 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21992 || code == UNEQ || code == LTGT))
21993 return reverse_condition_maybe_unordered (code);
21994 else
21995 return reverse_condition (code);
21998 /* Generate a compare for CODE. Return a brand-new rtx that
21999 represents the result of the compare. */
22001 static rtx
22002 rs6000_generate_compare (rtx cmp, machine_mode mode)
22004 machine_mode comp_mode;
22005 rtx compare_result;
22006 enum rtx_code code = GET_CODE (cmp);
22007 rtx op0 = XEXP (cmp, 0);
22008 rtx op1 = XEXP (cmp, 1);
22010 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22011 comp_mode = CCmode;
22012 else if (FLOAT_MODE_P (mode))
22013 comp_mode = CCFPmode;
22014 else if (code == GTU || code == LTU
22015 || code == GEU || code == LEU)
22016 comp_mode = CCUNSmode;
22017 else if ((code == EQ || code == NE)
22018 && unsigned_reg_p (op0)
22019 && (unsigned_reg_p (op1)
22020 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
22021 /* These are unsigned values, perhaps there will be a later
22022 ordering compare that can be shared with this one. */
22023 comp_mode = CCUNSmode;
22024 else
22025 comp_mode = CCmode;
22027 /* If we have an unsigned compare, make sure we don't have a signed value as
22028 an immediate. */
22029 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
22030 && INTVAL (op1) < 0)
22032 op0 = copy_rtx_if_shared (op0);
22033 op1 = force_reg (GET_MODE (op0), op1);
22034 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
22037 /* First, the compare. */
22038 compare_result = gen_reg_rtx (comp_mode);
22040 /* IEEE 128-bit support in VSX registers when we do not have hardware
22041 support. */
22042 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22044 rtx libfunc = NULL_RTX;
22045 bool check_nan = false;
22046 rtx dest;
22048 switch (code)
22050 case EQ:
22051 case NE:
22052 libfunc = optab_libfunc (eq_optab, mode);
22053 break;
22055 case GT:
22056 case GE:
22057 libfunc = optab_libfunc (ge_optab, mode);
22058 break;
22060 case LT:
22061 case LE:
22062 libfunc = optab_libfunc (le_optab, mode);
22063 break;
22065 case UNORDERED:
22066 case ORDERED:
22067 libfunc = optab_libfunc (unord_optab, mode);
22068 code = (code == UNORDERED) ? NE : EQ;
22069 break;
22071 case UNGE:
22072 case UNGT:
22073 check_nan = true;
22074 libfunc = optab_libfunc (ge_optab, mode);
22075 code = (code == UNGE) ? GE : GT;
22076 break;
22078 case UNLE:
22079 case UNLT:
22080 check_nan = true;
22081 libfunc = optab_libfunc (le_optab, mode);
22082 code = (code == UNLE) ? LE : LT;
22083 break;
22085 case UNEQ:
22086 case LTGT:
22087 check_nan = true;
22088 libfunc = optab_libfunc (eq_optab, mode);
22089 code = (code = UNEQ) ? EQ : NE;
22090 break;
22092 default:
22093 gcc_unreachable ();
22096 gcc_assert (libfunc);
22098 if (!check_nan)
22099 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22100 SImode, op0, mode, op1, mode);
22102 /* The library signals an exception for signalling NaNs, so we need to
22103 handle isgreater, etc. by first checking isordered. */
22104 else
22106 rtx ne_rtx, normal_dest, unord_dest;
22107 rtx unord_func = optab_libfunc (unord_optab, mode);
22108 rtx join_label = gen_label_rtx ();
22109 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22110 rtx unord_cmp = gen_reg_rtx (comp_mode);
22113 /* Test for either value being a NaN. */
22114 gcc_assert (unord_func);
22115 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22116 SImode, op0, mode, op1, mode);
22118 /* Set value (0) if either value is a NaN, and jump to the join
22119 label. */
22120 dest = gen_reg_rtx (SImode);
22121 emit_move_insn (dest, const1_rtx);
22122 emit_insn (gen_rtx_SET (unord_cmp,
22123 gen_rtx_COMPARE (comp_mode, unord_dest,
22124 const0_rtx)));
22126 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22127 emit_jump_insn (gen_rtx_SET (pc_rtx,
22128 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22129 join_ref,
22130 pc_rtx)));
22132 /* Do the normal comparison, knowing that the values are not
22133 NaNs. */
22134 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22135 SImode, op0, mode, op1, mode);
22137 emit_insn (gen_cstoresi4 (dest,
22138 gen_rtx_fmt_ee (code, SImode, normal_dest,
22139 const0_rtx),
22140 normal_dest, const0_rtx));
22142 /* Join NaN and non-Nan paths. Compare dest against 0. */
22143 emit_label (join_label);
22144 code = NE;
22147 emit_insn (gen_rtx_SET (compare_result,
22148 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22151 else
22153 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22154 CLOBBERs to match cmptf_internal2 pattern. */
22155 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22156 && FLOAT128_IBM_P (GET_MODE (op0))
22157 && TARGET_HARD_FLOAT)
22158 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22159 gen_rtvec (10,
22160 gen_rtx_SET (compare_result,
22161 gen_rtx_COMPARE (comp_mode, op0, op1)),
22162 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22163 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22164 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22165 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22166 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22167 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22168 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22169 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22170 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22171 else if (GET_CODE (op1) == UNSPEC
22172 && XINT (op1, 1) == UNSPEC_SP_TEST)
22174 rtx op1b = XVECEXP (op1, 0, 0);
22175 comp_mode = CCEQmode;
22176 compare_result = gen_reg_rtx (CCEQmode);
22177 if (TARGET_64BIT)
22178 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22179 else
22180 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22182 else
22183 emit_insn (gen_rtx_SET (compare_result,
22184 gen_rtx_COMPARE (comp_mode, op0, op1)));
22187 /* Some kinds of FP comparisons need an OR operation;
22188 under flag_finite_math_only we don't bother. */
22189 if (FLOAT_MODE_P (mode)
22190 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22191 && !flag_finite_math_only
22192 && (code == LE || code == GE
22193 || code == UNEQ || code == LTGT
22194 || code == UNGT || code == UNLT))
22196 enum rtx_code or1, or2;
22197 rtx or1_rtx, or2_rtx, compare2_rtx;
22198 rtx or_result = gen_reg_rtx (CCEQmode);
22200 switch (code)
22202 case LE: or1 = LT; or2 = EQ; break;
22203 case GE: or1 = GT; or2 = EQ; break;
22204 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22205 case LTGT: or1 = LT; or2 = GT; break;
22206 case UNGT: or1 = UNORDERED; or2 = GT; break;
22207 case UNLT: or1 = UNORDERED; or2 = LT; break;
22208 default: gcc_unreachable ();
22210 validate_condition_mode (or1, comp_mode);
22211 validate_condition_mode (or2, comp_mode);
22212 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22213 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22214 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22215 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22216 const_true_rtx);
22217 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22219 compare_result = or_result;
22220 code = EQ;
22223 validate_condition_mode (code, GET_MODE (compare_result));
22225 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22229 /* Return the diagnostic message string if the binary operation OP is
22230 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22232 static const char*
22233 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22234 const_tree type1,
22235 const_tree type2)
22237 machine_mode mode1 = TYPE_MODE (type1);
22238 machine_mode mode2 = TYPE_MODE (type2);
22240 /* For complex modes, use the inner type. */
22241 if (COMPLEX_MODE_P (mode1))
22242 mode1 = GET_MODE_INNER (mode1);
22244 if (COMPLEX_MODE_P (mode2))
22245 mode2 = GET_MODE_INNER (mode2);
22247 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22248 double to intermix unless -mfloat128-convert. */
22249 if (mode1 == mode2)
22250 return NULL;
22252 if (!TARGET_FLOAT128_CVT)
22254 if ((mode1 == KFmode && mode2 == IFmode)
22255 || (mode1 == IFmode && mode2 == KFmode))
22256 return N_("__float128 and __ibm128 cannot be used in the same "
22257 "expression");
22259 if (TARGET_IEEEQUAD
22260 && ((mode1 == IFmode && mode2 == TFmode)
22261 || (mode1 == TFmode && mode2 == IFmode)))
22262 return N_("__ibm128 and long double cannot be used in the same "
22263 "expression");
22265 if (!TARGET_IEEEQUAD
22266 && ((mode1 == KFmode && mode2 == TFmode)
22267 || (mode1 == TFmode && mode2 == KFmode)))
22268 return N_("__float128 and long double cannot be used in the same "
22269 "expression");
22272 return NULL;
22276 /* Expand floating point conversion to/from __float128 and __ibm128. */
22278 void
22279 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22281 machine_mode dest_mode = GET_MODE (dest);
22282 machine_mode src_mode = GET_MODE (src);
22283 convert_optab cvt = unknown_optab;
22284 bool do_move = false;
22285 rtx libfunc = NULL_RTX;
22286 rtx dest2;
22287 typedef rtx (*rtx_2func_t) (rtx, rtx);
22288 rtx_2func_t hw_convert = (rtx_2func_t)0;
22289 size_t kf_or_tf;
22291 struct hw_conv_t {
22292 rtx_2func_t from_df;
22293 rtx_2func_t from_sf;
22294 rtx_2func_t from_si_sign;
22295 rtx_2func_t from_si_uns;
22296 rtx_2func_t from_di_sign;
22297 rtx_2func_t from_di_uns;
22298 rtx_2func_t to_df;
22299 rtx_2func_t to_sf;
22300 rtx_2func_t to_si_sign;
22301 rtx_2func_t to_si_uns;
22302 rtx_2func_t to_di_sign;
22303 rtx_2func_t to_di_uns;
22304 } hw_conversions[2] = {
22305 /* convertions to/from KFmode */
22307 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22308 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22309 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22310 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22311 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22312 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22313 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22314 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22315 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22316 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22317 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22318 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22321 /* convertions to/from TFmode */
22323 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22324 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22325 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22326 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22327 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22328 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22329 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22330 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22331 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22332 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22333 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22334 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22338 if (dest_mode == src_mode)
22339 gcc_unreachable ();
22341 /* Eliminate memory operations. */
22342 if (MEM_P (src))
22343 src = force_reg (src_mode, src);
22345 if (MEM_P (dest))
22347 rtx tmp = gen_reg_rtx (dest_mode);
22348 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22349 rs6000_emit_move (dest, tmp, dest_mode);
22350 return;
22353 /* Convert to IEEE 128-bit floating point. */
22354 if (FLOAT128_IEEE_P (dest_mode))
22356 if (dest_mode == KFmode)
22357 kf_or_tf = 0;
22358 else if (dest_mode == TFmode)
22359 kf_or_tf = 1;
22360 else
22361 gcc_unreachable ();
22363 switch (src_mode)
22365 case E_DFmode:
22366 cvt = sext_optab;
22367 hw_convert = hw_conversions[kf_or_tf].from_df;
22368 break;
22370 case E_SFmode:
22371 cvt = sext_optab;
22372 hw_convert = hw_conversions[kf_or_tf].from_sf;
22373 break;
22375 case E_KFmode:
22376 case E_IFmode:
22377 case E_TFmode:
22378 if (FLOAT128_IBM_P (src_mode))
22379 cvt = sext_optab;
22380 else
22381 do_move = true;
22382 break;
22384 case E_SImode:
22385 if (unsigned_p)
22387 cvt = ufloat_optab;
22388 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22390 else
22392 cvt = sfloat_optab;
22393 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22395 break;
22397 case E_DImode:
22398 if (unsigned_p)
22400 cvt = ufloat_optab;
22401 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22403 else
22405 cvt = sfloat_optab;
22406 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22408 break;
22410 default:
22411 gcc_unreachable ();
22415 /* Convert from IEEE 128-bit floating point. */
22416 else if (FLOAT128_IEEE_P (src_mode))
22418 if (src_mode == KFmode)
22419 kf_or_tf = 0;
22420 else if (src_mode == TFmode)
22421 kf_or_tf = 1;
22422 else
22423 gcc_unreachable ();
22425 switch (dest_mode)
22427 case E_DFmode:
22428 cvt = trunc_optab;
22429 hw_convert = hw_conversions[kf_or_tf].to_df;
22430 break;
22432 case E_SFmode:
22433 cvt = trunc_optab;
22434 hw_convert = hw_conversions[kf_or_tf].to_sf;
22435 break;
22437 case E_KFmode:
22438 case E_IFmode:
22439 case E_TFmode:
22440 if (FLOAT128_IBM_P (dest_mode))
22441 cvt = trunc_optab;
22442 else
22443 do_move = true;
22444 break;
22446 case E_SImode:
22447 if (unsigned_p)
22449 cvt = ufix_optab;
22450 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22452 else
22454 cvt = sfix_optab;
22455 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22457 break;
22459 case E_DImode:
22460 if (unsigned_p)
22462 cvt = ufix_optab;
22463 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22465 else
22467 cvt = sfix_optab;
22468 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22470 break;
22472 default:
22473 gcc_unreachable ();
22477 /* Both IBM format. */
22478 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22479 do_move = true;
22481 else
22482 gcc_unreachable ();
22484 /* Handle conversion between TFmode/KFmode. */
22485 if (do_move)
22486 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22488 /* Handle conversion if we have hardware support. */
22489 else if (TARGET_FLOAT128_HW && hw_convert)
22490 emit_insn ((hw_convert) (dest, src));
22492 /* Call an external function to do the conversion. */
22493 else if (cvt != unknown_optab)
22495 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22496 gcc_assert (libfunc != NULL_RTX);
22498 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22499 src, src_mode);
22501 gcc_assert (dest2 != NULL_RTX);
22502 if (!rtx_equal_p (dest, dest2))
22503 emit_move_insn (dest, dest2);
22506 else
22507 gcc_unreachable ();
22509 return;
22513 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22514 can be used as that dest register. Return the dest register. */
22517 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22519 if (op2 == const0_rtx)
22520 return op1;
22522 if (GET_CODE (scratch) == SCRATCH)
22523 scratch = gen_reg_rtx (mode);
22525 if (logical_operand (op2, mode))
22526 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22527 else
22528 emit_insn (gen_rtx_SET (scratch,
22529 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22531 return scratch;
22534 void
22535 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22537 rtx condition_rtx;
22538 machine_mode op_mode;
22539 enum rtx_code cond_code;
22540 rtx result = operands[0];
22542 condition_rtx = rs6000_generate_compare (operands[1], mode);
22543 cond_code = GET_CODE (condition_rtx);
22545 if (cond_code == NE
22546 || cond_code == GE || cond_code == LE
22547 || cond_code == GEU || cond_code == LEU
22548 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22550 rtx not_result = gen_reg_rtx (CCEQmode);
22551 rtx not_op, rev_cond_rtx;
22552 machine_mode cc_mode;
22554 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22556 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22557 SImode, XEXP (condition_rtx, 0), const0_rtx);
22558 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22559 emit_insn (gen_rtx_SET (not_result, not_op));
22560 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22563 op_mode = GET_MODE (XEXP (operands[1], 0));
22564 if (op_mode == VOIDmode)
22565 op_mode = GET_MODE (XEXP (operands[1], 1));
22567 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22569 PUT_MODE (condition_rtx, DImode);
22570 convert_move (result, condition_rtx, 0);
22572 else
22574 PUT_MODE (condition_rtx, SImode);
22575 emit_insn (gen_rtx_SET (result, condition_rtx));
22579 /* Emit a branch of kind CODE to location LOC. */
22581 void
22582 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22584 rtx condition_rtx, loc_ref;
22586 condition_rtx = rs6000_generate_compare (operands[0], mode);
22587 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22588 emit_jump_insn (gen_rtx_SET (pc_rtx,
22589 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22590 loc_ref, pc_rtx)));
22593 /* Return the string to output a conditional branch to LABEL, which is
22594 the operand template of the label, or NULL if the branch is really a
22595 conditional return.
22597 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22598 condition code register and its mode specifies what kind of
22599 comparison we made.
22601 REVERSED is nonzero if we should reverse the sense of the comparison.
22603 INSN is the insn. */
22605 char *
22606 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22608 static char string[64];
22609 enum rtx_code code = GET_CODE (op);
22610 rtx cc_reg = XEXP (op, 0);
22611 machine_mode mode = GET_MODE (cc_reg);
22612 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22613 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22614 int really_reversed = reversed ^ need_longbranch;
22615 char *s = string;
22616 const char *ccode;
22617 const char *pred;
22618 rtx note;
22620 validate_condition_mode (code, mode);
22622 /* Work out which way this really branches. We could use
22623 reverse_condition_maybe_unordered here always but this
22624 makes the resulting assembler clearer. */
22625 if (really_reversed)
22627 /* Reversal of FP compares takes care -- an ordered compare
22628 becomes an unordered compare and vice versa. */
22629 if (mode == CCFPmode)
22630 code = reverse_condition_maybe_unordered (code);
22631 else
22632 code = reverse_condition (code);
22635 switch (code)
22637 /* Not all of these are actually distinct opcodes, but
22638 we distinguish them for clarity of the resulting assembler. */
22639 case NE: case LTGT:
22640 ccode = "ne"; break;
22641 case EQ: case UNEQ:
22642 ccode = "eq"; break;
22643 case GE: case GEU:
22644 ccode = "ge"; break;
22645 case GT: case GTU: case UNGT:
22646 ccode = "gt"; break;
22647 case LE: case LEU:
22648 ccode = "le"; break;
22649 case LT: case LTU: case UNLT:
22650 ccode = "lt"; break;
22651 case UNORDERED: ccode = "un"; break;
22652 case ORDERED: ccode = "nu"; break;
22653 case UNGE: ccode = "nl"; break;
22654 case UNLE: ccode = "ng"; break;
22655 default:
22656 gcc_unreachable ();
22659 /* Maybe we have a guess as to how likely the branch is. */
22660 pred = "";
22661 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22662 if (note != NULL_RTX)
22664 /* PROB is the difference from 50%. */
22665 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22666 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22668 /* Only hint for highly probable/improbable branches on newer cpus when
22669 we have real profile data, as static prediction overrides processor
22670 dynamic prediction. For older cpus we may as well always hint, but
22671 assume not taken for branches that are very close to 50% as a
22672 mispredicted taken branch is more expensive than a
22673 mispredicted not-taken branch. */
22674 if (rs6000_always_hint
22675 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22676 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22677 && br_prob_note_reliable_p (note)))
22679 if (abs (prob) > REG_BR_PROB_BASE / 20
22680 && ((prob > 0) ^ need_longbranch))
22681 pred = "+";
22682 else
22683 pred = "-";
22687 if (label == NULL)
22688 s += sprintf (s, "b%slr%s ", ccode, pred);
22689 else
22690 s += sprintf (s, "b%s%s ", ccode, pred);
22692 /* We need to escape any '%' characters in the reg_names string.
22693 Assume they'd only be the first character.... */
22694 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22695 *s++ = '%';
22696 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22698 if (label != NULL)
22700 /* If the branch distance was too far, we may have to use an
22701 unconditional branch to go the distance. */
22702 if (need_longbranch)
22703 s += sprintf (s, ",$+8\n\tb %s", label);
22704 else
22705 s += sprintf (s, ",%s", label);
22708 return string;
22711 /* Return insn for VSX or Altivec comparisons. */
22713 static rtx
22714 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22716 rtx mask;
22717 machine_mode mode = GET_MODE (op0);
22719 switch (code)
22721 default:
22722 break;
22724 case GE:
22725 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22726 return NULL_RTX;
22727 /* FALLTHRU */
22729 case EQ:
22730 case GT:
22731 case GTU:
22732 case ORDERED:
22733 case UNORDERED:
22734 case UNEQ:
22735 case LTGT:
22736 mask = gen_reg_rtx (mode);
22737 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22738 return mask;
22741 return NULL_RTX;
22744 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22745 DMODE is expected destination mode. This is a recursive function. */
22747 static rtx
22748 rs6000_emit_vector_compare (enum rtx_code rcode,
22749 rtx op0, rtx op1,
22750 machine_mode dmode)
22752 rtx mask;
22753 bool swap_operands = false;
22754 bool try_again = false;
22756 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22757 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22759 /* See if the comparison works as is. */
22760 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22761 if (mask)
22762 return mask;
22764 switch (rcode)
22766 case LT:
22767 rcode = GT;
22768 swap_operands = true;
22769 try_again = true;
22770 break;
22771 case LTU:
22772 rcode = GTU;
22773 swap_operands = true;
22774 try_again = true;
22775 break;
22776 case NE:
22777 case UNLE:
22778 case UNLT:
22779 case UNGE:
22780 case UNGT:
22781 /* Invert condition and try again.
22782 e.g., A != B becomes ~(A==B). */
22784 enum rtx_code rev_code;
22785 enum insn_code nor_code;
22786 rtx mask2;
22788 rev_code = reverse_condition_maybe_unordered (rcode);
22789 if (rev_code == UNKNOWN)
22790 return NULL_RTX;
22792 nor_code = optab_handler (one_cmpl_optab, dmode);
22793 if (nor_code == CODE_FOR_nothing)
22794 return NULL_RTX;
22796 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22797 if (!mask2)
22798 return NULL_RTX;
22800 mask = gen_reg_rtx (dmode);
22801 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22802 return mask;
22804 break;
22805 case GE:
22806 case GEU:
22807 case LE:
22808 case LEU:
22809 /* Try GT/GTU/LT/LTU OR EQ */
22811 rtx c_rtx, eq_rtx;
22812 enum insn_code ior_code;
22813 enum rtx_code new_code;
22815 switch (rcode)
22817 case GE:
22818 new_code = GT;
22819 break;
22821 case GEU:
22822 new_code = GTU;
22823 break;
22825 case LE:
22826 new_code = LT;
22827 break;
22829 case LEU:
22830 new_code = LTU;
22831 break;
22833 default:
22834 gcc_unreachable ();
22837 ior_code = optab_handler (ior_optab, dmode);
22838 if (ior_code == CODE_FOR_nothing)
22839 return NULL_RTX;
22841 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22842 if (!c_rtx)
22843 return NULL_RTX;
22845 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22846 if (!eq_rtx)
22847 return NULL_RTX;
22849 mask = gen_reg_rtx (dmode);
22850 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22851 return mask;
22853 break;
22854 default:
22855 return NULL_RTX;
22858 if (try_again)
22860 if (swap_operands)
22861 std::swap (op0, op1);
22863 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22864 if (mask)
22865 return mask;
22868 /* You only get two chances. */
22869 return NULL_RTX;
22872 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22873 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22874 operands for the relation operation COND. */
22877 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22878 rtx cond, rtx cc_op0, rtx cc_op1)
22880 machine_mode dest_mode = GET_MODE (dest);
22881 machine_mode mask_mode = GET_MODE (cc_op0);
22882 enum rtx_code rcode = GET_CODE (cond);
22883 machine_mode cc_mode = CCmode;
22884 rtx mask;
22885 rtx cond2;
22886 bool invert_move = false;
22888 if (VECTOR_UNIT_NONE_P (dest_mode))
22889 return 0;
22891 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22892 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22894 switch (rcode)
22896 /* Swap operands if we can, and fall back to doing the operation as
22897 specified, and doing a NOR to invert the test. */
22898 case NE:
22899 case UNLE:
22900 case UNLT:
22901 case UNGE:
22902 case UNGT:
22903 /* Invert condition and try again.
22904 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22905 invert_move = true;
22906 rcode = reverse_condition_maybe_unordered (rcode);
22907 if (rcode == UNKNOWN)
22908 return 0;
22909 break;
22911 case GE:
22912 case LE:
22913 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22915 /* Invert condition to avoid compound test. */
22916 invert_move = true;
22917 rcode = reverse_condition (rcode);
22919 break;
22921 case GTU:
22922 case GEU:
22923 case LTU:
22924 case LEU:
22925 /* Mark unsigned tests with CCUNSmode. */
22926 cc_mode = CCUNSmode;
22928 /* Invert condition to avoid compound test if necessary. */
22929 if (rcode == GEU || rcode == LEU)
22931 invert_move = true;
22932 rcode = reverse_condition (rcode);
22934 break;
22936 default:
22937 break;
22940 /* Get the vector mask for the given relational operations. */
22941 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22943 if (!mask)
22944 return 0;
22946 if (invert_move)
22947 std::swap (op_true, op_false);
22949 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22950 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22951 && (GET_CODE (op_true) == CONST_VECTOR
22952 || GET_CODE (op_false) == CONST_VECTOR))
22954 rtx constant_0 = CONST0_RTX (dest_mode);
22955 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22957 if (op_true == constant_m1 && op_false == constant_0)
22959 emit_move_insn (dest, mask);
22960 return 1;
22963 else if (op_true == constant_0 && op_false == constant_m1)
22965 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22966 return 1;
22969 /* If we can't use the vector comparison directly, perhaps we can use
22970 the mask for the true or false fields, instead of loading up a
22971 constant. */
22972 if (op_true == constant_m1)
22973 op_true = mask;
22975 if (op_false == constant_0)
22976 op_false = mask;
22979 if (!REG_P (op_true) && !SUBREG_P (op_true))
22980 op_true = force_reg (dest_mode, op_true);
22982 if (!REG_P (op_false) && !SUBREG_P (op_false))
22983 op_false = force_reg (dest_mode, op_false);
22985 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22986 CONST0_RTX (dest_mode));
22987 emit_insn (gen_rtx_SET (dest,
22988 gen_rtx_IF_THEN_ELSE (dest_mode,
22989 cond2,
22990 op_true,
22991 op_false)));
22992 return 1;
22995 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22996 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22997 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22998 hardware has no such operation. */
23000 static int
23001 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23003 enum rtx_code code = GET_CODE (op);
23004 rtx op0 = XEXP (op, 0);
23005 rtx op1 = XEXP (op, 1);
23006 machine_mode compare_mode = GET_MODE (op0);
23007 machine_mode result_mode = GET_MODE (dest);
23008 bool max_p = false;
23010 if (result_mode != compare_mode)
23011 return 0;
23013 if (code == GE || code == GT)
23014 max_p = true;
23015 else if (code == LE || code == LT)
23016 max_p = false;
23017 else
23018 return 0;
23020 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
23023 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
23024 max_p = !max_p;
23026 else
23027 return 0;
23029 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
23030 return 1;
23033 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23034 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23035 operands of the last comparison is nonzero/true, FALSE_COND if it is
23036 zero/false. Return 0 if the hardware has no such operation. */
23038 static int
23039 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23041 enum rtx_code code = GET_CODE (op);
23042 rtx op0 = XEXP (op, 0);
23043 rtx op1 = XEXP (op, 1);
23044 machine_mode result_mode = GET_MODE (dest);
23045 rtx compare_rtx;
23046 rtx cmove_rtx;
23047 rtx clobber_rtx;
23049 if (!can_create_pseudo_p ())
23050 return 0;
23052 switch (code)
23054 case EQ:
23055 case GE:
23056 case GT:
23057 break;
23059 case NE:
23060 case LT:
23061 case LE:
23062 code = swap_condition (code);
23063 std::swap (op0, op1);
23064 break;
23066 default:
23067 return 0;
23070 /* Generate: [(parallel [(set (dest)
23071 (if_then_else (op (cmp1) (cmp2))
23072 (true)
23073 (false)))
23074 (clobber (scratch))])]. */
23076 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23077 cmove_rtx = gen_rtx_SET (dest,
23078 gen_rtx_IF_THEN_ELSE (result_mode,
23079 compare_rtx,
23080 true_cond,
23081 false_cond));
23083 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23084 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23085 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23087 return 1;
23090 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23091 operands of the last comparison is nonzero/true, FALSE_COND if it
23092 is zero/false. Return 0 if the hardware has no such operation. */
23095 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23097 enum rtx_code code = GET_CODE (op);
23098 rtx op0 = XEXP (op, 0);
23099 rtx op1 = XEXP (op, 1);
23100 machine_mode compare_mode = GET_MODE (op0);
23101 machine_mode result_mode = GET_MODE (dest);
23102 rtx temp;
23103 bool is_against_zero;
23105 /* These modes should always match. */
23106 if (GET_MODE (op1) != compare_mode
23107 /* In the isel case however, we can use a compare immediate, so
23108 op1 may be a small constant. */
23109 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23110 return 0;
23111 if (GET_MODE (true_cond) != result_mode)
23112 return 0;
23113 if (GET_MODE (false_cond) != result_mode)
23114 return 0;
23116 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23117 if (TARGET_P9_MINMAX
23118 && (compare_mode == SFmode || compare_mode == DFmode)
23119 && (result_mode == SFmode || result_mode == DFmode))
23121 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23122 return 1;
23124 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23125 return 1;
23128 /* Don't allow using floating point comparisons for integer results for
23129 now. */
23130 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23131 return 0;
23133 /* First, work out if the hardware can do this at all, or
23134 if it's too slow.... */
23135 if (!FLOAT_MODE_P (compare_mode))
23137 if (TARGET_ISEL)
23138 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23139 return 0;
23142 is_against_zero = op1 == CONST0_RTX (compare_mode);
23144 /* A floating-point subtract might overflow, underflow, or produce
23145 an inexact result, thus changing the floating-point flags, so it
23146 can't be generated if we care about that. It's safe if one side
23147 of the construct is zero, since then no subtract will be
23148 generated. */
23149 if (SCALAR_FLOAT_MODE_P (compare_mode)
23150 && flag_trapping_math && ! is_against_zero)
23151 return 0;
23153 /* Eliminate half of the comparisons by switching operands, this
23154 makes the remaining code simpler. */
23155 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23156 || code == LTGT || code == LT || code == UNLE)
23158 code = reverse_condition_maybe_unordered (code);
23159 temp = true_cond;
23160 true_cond = false_cond;
23161 false_cond = temp;
23164 /* UNEQ and LTGT take four instructions for a comparison with zero,
23165 it'll probably be faster to use a branch here too. */
23166 if (code == UNEQ && HONOR_NANS (compare_mode))
23167 return 0;
23169 /* We're going to try to implement comparisons by performing
23170 a subtract, then comparing against zero. Unfortunately,
23171 Inf - Inf is NaN which is not zero, and so if we don't
23172 know that the operand is finite and the comparison
23173 would treat EQ different to UNORDERED, we can't do it. */
23174 if (HONOR_INFINITIES (compare_mode)
23175 && code != GT && code != UNGE
23176 && (GET_CODE (op1) != CONST_DOUBLE
23177 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23178 /* Constructs of the form (a OP b ? a : b) are safe. */
23179 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23180 || (! rtx_equal_p (op0, true_cond)
23181 && ! rtx_equal_p (op1, true_cond))))
23182 return 0;
23184 /* At this point we know we can use fsel. */
23186 /* Reduce the comparison to a comparison against zero. */
23187 if (! is_against_zero)
23189 temp = gen_reg_rtx (compare_mode);
23190 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23191 op0 = temp;
23192 op1 = CONST0_RTX (compare_mode);
23195 /* If we don't care about NaNs we can reduce some of the comparisons
23196 down to faster ones. */
23197 if (! HONOR_NANS (compare_mode))
23198 switch (code)
23200 case GT:
23201 code = LE;
23202 temp = true_cond;
23203 true_cond = false_cond;
23204 false_cond = temp;
23205 break;
23206 case UNGE:
23207 code = GE;
23208 break;
23209 case UNEQ:
23210 code = EQ;
23211 break;
23212 default:
23213 break;
23216 /* Now, reduce everything down to a GE. */
23217 switch (code)
23219 case GE:
23220 break;
23222 case LE:
23223 temp = gen_reg_rtx (compare_mode);
23224 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23225 op0 = temp;
23226 break;
23228 case ORDERED:
23229 temp = gen_reg_rtx (compare_mode);
23230 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23231 op0 = temp;
23232 break;
23234 case EQ:
23235 temp = gen_reg_rtx (compare_mode);
23236 emit_insn (gen_rtx_SET (temp,
23237 gen_rtx_NEG (compare_mode,
23238 gen_rtx_ABS (compare_mode, op0))));
23239 op0 = temp;
23240 break;
23242 case UNGE:
23243 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23244 temp = gen_reg_rtx (result_mode);
23245 emit_insn (gen_rtx_SET (temp,
23246 gen_rtx_IF_THEN_ELSE (result_mode,
23247 gen_rtx_GE (VOIDmode,
23248 op0, op1),
23249 true_cond, false_cond)));
23250 false_cond = true_cond;
23251 true_cond = temp;
23253 temp = gen_reg_rtx (compare_mode);
23254 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23255 op0 = temp;
23256 break;
23258 case GT:
23259 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23260 temp = gen_reg_rtx (result_mode);
23261 emit_insn (gen_rtx_SET (temp,
23262 gen_rtx_IF_THEN_ELSE (result_mode,
23263 gen_rtx_GE (VOIDmode,
23264 op0, op1),
23265 true_cond, false_cond)));
23266 true_cond = false_cond;
23267 false_cond = temp;
23269 temp = gen_reg_rtx (compare_mode);
23270 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23271 op0 = temp;
23272 break;
23274 default:
23275 gcc_unreachable ();
23278 emit_insn (gen_rtx_SET (dest,
23279 gen_rtx_IF_THEN_ELSE (result_mode,
23280 gen_rtx_GE (VOIDmode,
23281 op0, op1),
23282 true_cond, false_cond)));
23283 return 1;
23286 /* Same as above, but for ints (isel). */
23289 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23291 rtx condition_rtx, cr;
23292 machine_mode mode = GET_MODE (dest);
23293 enum rtx_code cond_code;
23294 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23295 bool signedp;
23297 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23298 return 0;
23300 /* We still have to do the compare, because isel doesn't do a
23301 compare, it just looks at the CRx bits set by a previous compare
23302 instruction. */
23303 condition_rtx = rs6000_generate_compare (op, mode);
23304 cond_code = GET_CODE (condition_rtx);
23305 cr = XEXP (condition_rtx, 0);
23306 signedp = GET_MODE (cr) == CCmode;
23308 isel_func = (mode == SImode
23309 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23310 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23312 switch (cond_code)
23314 case LT: case GT: case LTU: case GTU: case EQ:
23315 /* isel handles these directly. */
23316 break;
23318 default:
23319 /* We need to swap the sense of the comparison. */
23321 std::swap (false_cond, true_cond);
23322 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23324 break;
23327 false_cond = force_reg (mode, false_cond);
23328 if (true_cond != const0_rtx)
23329 true_cond = force_reg (mode, true_cond);
23331 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23333 return 1;
23336 void
23337 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23339 machine_mode mode = GET_MODE (op0);
23340 enum rtx_code c;
23341 rtx target;
23343 /* VSX/altivec have direct min/max insns. */
23344 if ((code == SMAX || code == SMIN)
23345 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23346 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23348 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23349 return;
23352 if (code == SMAX || code == SMIN)
23353 c = GE;
23354 else
23355 c = GEU;
23357 if (code == SMAX || code == UMAX)
23358 target = emit_conditional_move (dest, c, op0, op1, mode,
23359 op0, op1, mode, 0);
23360 else
23361 target = emit_conditional_move (dest, c, op0, op1, mode,
23362 op1, op0, mode, 0);
23363 gcc_assert (target);
23364 if (target != dest)
23365 emit_move_insn (dest, target);
23368 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23369 COND is true. Mark the jump as unlikely to be taken. */
23371 static void
23372 emit_unlikely_jump (rtx cond, rtx label)
23374 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23375 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23376 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23379 /* A subroutine of the atomic operation splitters. Emit a load-locked
23380 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23381 the zero_extend operation. */
23383 static void
23384 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23386 rtx (*fn) (rtx, rtx) = NULL;
23388 switch (mode)
23390 case E_QImode:
23391 fn = gen_load_lockedqi;
23392 break;
23393 case E_HImode:
23394 fn = gen_load_lockedhi;
23395 break;
23396 case E_SImode:
23397 if (GET_MODE (mem) == QImode)
23398 fn = gen_load_lockedqi_si;
23399 else if (GET_MODE (mem) == HImode)
23400 fn = gen_load_lockedhi_si;
23401 else
23402 fn = gen_load_lockedsi;
23403 break;
23404 case E_DImode:
23405 fn = gen_load_lockeddi;
23406 break;
23407 case E_TImode:
23408 fn = gen_load_lockedti;
23409 break;
23410 default:
23411 gcc_unreachable ();
23413 emit_insn (fn (reg, mem));
23416 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23417 instruction in MODE. */
23419 static void
23420 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23422 rtx (*fn) (rtx, rtx, rtx) = NULL;
23424 switch (mode)
23426 case E_QImode:
23427 fn = gen_store_conditionalqi;
23428 break;
23429 case E_HImode:
23430 fn = gen_store_conditionalhi;
23431 break;
23432 case E_SImode:
23433 fn = gen_store_conditionalsi;
23434 break;
23435 case E_DImode:
23436 fn = gen_store_conditionaldi;
23437 break;
23438 case E_TImode:
23439 fn = gen_store_conditionalti;
23440 break;
23441 default:
23442 gcc_unreachable ();
23445 /* Emit sync before stwcx. to address PPC405 Erratum. */
23446 if (PPC405_ERRATUM77)
23447 emit_insn (gen_hwsync ());
23449 emit_insn (fn (res, mem, val));
23452 /* Expand barriers before and after a load_locked/store_cond sequence. */
23454 static rtx
23455 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23457 rtx addr = XEXP (mem, 0);
23459 if (!legitimate_indirect_address_p (addr, reload_completed)
23460 && !legitimate_indexed_address_p (addr, reload_completed))
23462 addr = force_reg (Pmode, addr);
23463 mem = replace_equiv_address_nv (mem, addr);
23466 switch (model)
23468 case MEMMODEL_RELAXED:
23469 case MEMMODEL_CONSUME:
23470 case MEMMODEL_ACQUIRE:
23471 break;
23472 case MEMMODEL_RELEASE:
23473 case MEMMODEL_ACQ_REL:
23474 emit_insn (gen_lwsync ());
23475 break;
23476 case MEMMODEL_SEQ_CST:
23477 emit_insn (gen_hwsync ());
23478 break;
23479 default:
23480 gcc_unreachable ();
23482 return mem;
23485 static void
23486 rs6000_post_atomic_barrier (enum memmodel model)
23488 switch (model)
23490 case MEMMODEL_RELAXED:
23491 case MEMMODEL_CONSUME:
23492 case MEMMODEL_RELEASE:
23493 break;
23494 case MEMMODEL_ACQUIRE:
23495 case MEMMODEL_ACQ_REL:
23496 case MEMMODEL_SEQ_CST:
23497 emit_insn (gen_isync ());
23498 break;
23499 default:
23500 gcc_unreachable ();
23504 /* A subroutine of the various atomic expanders. For sub-word operations,
23505 we must adjust things to operate on SImode. Given the original MEM,
23506 return a new aligned memory. Also build and return the quantities by
23507 which to shift and mask. */
23509 static rtx
23510 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23512 rtx addr, align, shift, mask, mem;
23513 HOST_WIDE_INT shift_mask;
23514 machine_mode mode = GET_MODE (orig_mem);
23516 /* For smaller modes, we have to implement this via SImode. */
23517 shift_mask = (mode == QImode ? 0x18 : 0x10);
23519 addr = XEXP (orig_mem, 0);
23520 addr = force_reg (GET_MODE (addr), addr);
23522 /* Aligned memory containing subword. Generate a new memory. We
23523 do not want any of the existing MEM_ATTR data, as we're now
23524 accessing memory outside the original object. */
23525 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23526 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23527 mem = gen_rtx_MEM (SImode, align);
23528 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23529 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23530 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23532 /* Shift amount for subword relative to aligned word. */
23533 shift = gen_reg_rtx (SImode);
23534 addr = gen_lowpart (SImode, addr);
23535 rtx tmp = gen_reg_rtx (SImode);
23536 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23537 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23538 if (BYTES_BIG_ENDIAN)
23539 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23540 shift, 1, OPTAB_LIB_WIDEN);
23541 *pshift = shift;
23543 /* Mask for insertion. */
23544 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23545 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23546 *pmask = mask;
23548 return mem;
23551 /* A subroutine of the various atomic expanders. For sub-word operands,
23552 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23554 static rtx
23555 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23557 rtx x;
23559 x = gen_reg_rtx (SImode);
23560 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23561 gen_rtx_NOT (SImode, mask),
23562 oldval)));
23564 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23566 return x;
23569 /* A subroutine of the various atomic expanders. For sub-word operands,
23570 extract WIDE to NARROW via SHIFT. */
23572 static void
23573 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23575 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23576 wide, 1, OPTAB_LIB_WIDEN);
23577 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23580 /* Expand an atomic compare and swap operation. */
23582 void
23583 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23585 rtx boolval, retval, mem, oldval, newval, cond;
23586 rtx label1, label2, x, mask, shift;
23587 machine_mode mode, orig_mode;
23588 enum memmodel mod_s, mod_f;
23589 bool is_weak;
23591 boolval = operands[0];
23592 retval = operands[1];
23593 mem = operands[2];
23594 oldval = operands[3];
23595 newval = operands[4];
23596 is_weak = (INTVAL (operands[5]) != 0);
23597 mod_s = memmodel_base (INTVAL (operands[6]));
23598 mod_f = memmodel_base (INTVAL (operands[7]));
23599 orig_mode = mode = GET_MODE (mem);
23601 mask = shift = NULL_RTX;
23602 if (mode == QImode || mode == HImode)
23604 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23605 lwarx and shift/mask operations. With power8, we need to do the
23606 comparison in SImode, but the store is still done in QI/HImode. */
23607 oldval = convert_modes (SImode, mode, oldval, 1);
23609 if (!TARGET_SYNC_HI_QI)
23611 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23613 /* Shift and mask OLDVAL into position with the word. */
23614 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23617 /* Shift and mask NEWVAL into position within the word. */
23618 newval = convert_modes (SImode, mode, newval, 1);
23619 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23620 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23623 /* Prepare to adjust the return value. */
23624 retval = gen_reg_rtx (SImode);
23625 mode = SImode;
23627 else if (reg_overlap_mentioned_p (retval, oldval))
23628 oldval = copy_to_reg (oldval);
23630 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23631 oldval = copy_to_mode_reg (mode, oldval);
23633 if (reg_overlap_mentioned_p (retval, newval))
23634 newval = copy_to_reg (newval);
23636 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23638 label1 = NULL_RTX;
23639 if (!is_weak)
23641 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23642 emit_label (XEXP (label1, 0));
23644 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23646 emit_load_locked (mode, retval, mem);
23648 x = retval;
23649 if (mask)
23650 x = expand_simple_binop (SImode, AND, retval, mask,
23651 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23653 cond = gen_reg_rtx (CCmode);
23654 /* If we have TImode, synthesize a comparison. */
23655 if (mode != TImode)
23656 x = gen_rtx_COMPARE (CCmode, x, oldval);
23657 else
23659 rtx xor1_result = gen_reg_rtx (DImode);
23660 rtx xor2_result = gen_reg_rtx (DImode);
23661 rtx or_result = gen_reg_rtx (DImode);
23662 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23663 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23664 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23665 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23667 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23668 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23669 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23670 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23673 emit_insn (gen_rtx_SET (cond, x));
23675 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23676 emit_unlikely_jump (x, label2);
23678 x = newval;
23679 if (mask)
23680 x = rs6000_mask_atomic_subword (retval, newval, mask);
23682 emit_store_conditional (orig_mode, cond, mem, x);
23684 if (!is_weak)
23686 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23687 emit_unlikely_jump (x, label1);
23690 if (!is_mm_relaxed (mod_f))
23691 emit_label (XEXP (label2, 0));
23693 rs6000_post_atomic_barrier (mod_s);
23695 if (is_mm_relaxed (mod_f))
23696 emit_label (XEXP (label2, 0));
23698 if (shift)
23699 rs6000_finish_atomic_subword (operands[1], retval, shift);
23700 else if (mode != GET_MODE (operands[1]))
23701 convert_move (operands[1], retval, 1);
23703 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23704 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23705 emit_insn (gen_rtx_SET (boolval, x));
23708 /* Expand an atomic exchange operation. */
23710 void
23711 rs6000_expand_atomic_exchange (rtx operands[])
23713 rtx retval, mem, val, cond;
23714 machine_mode mode;
23715 enum memmodel model;
23716 rtx label, x, mask, shift;
23718 retval = operands[0];
23719 mem = operands[1];
23720 val = operands[2];
23721 model = memmodel_base (INTVAL (operands[3]));
23722 mode = GET_MODE (mem);
23724 mask = shift = NULL_RTX;
23725 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23727 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23729 /* Shift and mask VAL into position with the word. */
23730 val = convert_modes (SImode, mode, val, 1);
23731 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23732 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23734 /* Prepare to adjust the return value. */
23735 retval = gen_reg_rtx (SImode);
23736 mode = SImode;
23739 mem = rs6000_pre_atomic_barrier (mem, model);
23741 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23742 emit_label (XEXP (label, 0));
23744 emit_load_locked (mode, retval, mem);
23746 x = val;
23747 if (mask)
23748 x = rs6000_mask_atomic_subword (retval, val, mask);
23750 cond = gen_reg_rtx (CCmode);
23751 emit_store_conditional (mode, cond, mem, x);
23753 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23754 emit_unlikely_jump (x, label);
23756 rs6000_post_atomic_barrier (model);
23758 if (shift)
23759 rs6000_finish_atomic_subword (operands[0], retval, shift);
23762 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23763 to perform. MEM is the memory on which to operate. VAL is the second
23764 operand of the binary operator. BEFORE and AFTER are optional locations to
23765 return the value of MEM either before of after the operation. MODEL_RTX
23766 is a CONST_INT containing the memory model to use. */
23768 void
23769 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23770 rtx orig_before, rtx orig_after, rtx model_rtx)
23772 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23773 machine_mode mode = GET_MODE (mem);
23774 machine_mode store_mode = mode;
23775 rtx label, x, cond, mask, shift;
23776 rtx before = orig_before, after = orig_after;
23778 mask = shift = NULL_RTX;
23779 /* On power8, we want to use SImode for the operation. On previous systems,
23780 use the operation in a subword and shift/mask to get the proper byte or
23781 halfword. */
23782 if (mode == QImode || mode == HImode)
23784 if (TARGET_SYNC_HI_QI)
23786 val = convert_modes (SImode, mode, val, 1);
23788 /* Prepare to adjust the return value. */
23789 before = gen_reg_rtx (SImode);
23790 if (after)
23791 after = gen_reg_rtx (SImode);
23792 mode = SImode;
23794 else
23796 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23798 /* Shift and mask VAL into position with the word. */
23799 val = convert_modes (SImode, mode, val, 1);
23800 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23801 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23803 switch (code)
23805 case IOR:
23806 case XOR:
23807 /* We've already zero-extended VAL. That is sufficient to
23808 make certain that it does not affect other bits. */
23809 mask = NULL;
23810 break;
23812 case AND:
23813 /* If we make certain that all of the other bits in VAL are
23814 set, that will be sufficient to not affect other bits. */
23815 x = gen_rtx_NOT (SImode, mask);
23816 x = gen_rtx_IOR (SImode, x, val);
23817 emit_insn (gen_rtx_SET (val, x));
23818 mask = NULL;
23819 break;
23821 case NOT:
23822 case PLUS:
23823 case MINUS:
23824 /* These will all affect bits outside the field and need
23825 adjustment via MASK within the loop. */
23826 break;
23828 default:
23829 gcc_unreachable ();
23832 /* Prepare to adjust the return value. */
23833 before = gen_reg_rtx (SImode);
23834 if (after)
23835 after = gen_reg_rtx (SImode);
23836 store_mode = mode = SImode;
23840 mem = rs6000_pre_atomic_barrier (mem, model);
23842 label = gen_label_rtx ();
23843 emit_label (label);
23844 label = gen_rtx_LABEL_REF (VOIDmode, label);
23846 if (before == NULL_RTX)
23847 before = gen_reg_rtx (mode);
23849 emit_load_locked (mode, before, mem);
23851 if (code == NOT)
23853 x = expand_simple_binop (mode, AND, before, val,
23854 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23855 after = expand_simple_unop (mode, NOT, x, after, 1);
23857 else
23859 after = expand_simple_binop (mode, code, before, val,
23860 after, 1, OPTAB_LIB_WIDEN);
23863 x = after;
23864 if (mask)
23866 x = expand_simple_binop (SImode, AND, after, mask,
23867 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23868 x = rs6000_mask_atomic_subword (before, x, mask);
23870 else if (store_mode != mode)
23871 x = convert_modes (store_mode, mode, x, 1);
23873 cond = gen_reg_rtx (CCmode);
23874 emit_store_conditional (store_mode, cond, mem, x);
23876 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23877 emit_unlikely_jump (x, label);
23879 rs6000_post_atomic_barrier (model);
23881 if (shift)
23883 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23884 then do the calcuations in a SImode register. */
23885 if (orig_before)
23886 rs6000_finish_atomic_subword (orig_before, before, shift);
23887 if (orig_after)
23888 rs6000_finish_atomic_subword (orig_after, after, shift);
23890 else if (store_mode != mode)
23892 /* QImode/HImode on machines with lbarx/lharx where we do the native
23893 operation and then do the calcuations in a SImode register. */
23894 if (orig_before)
23895 convert_move (orig_before, before, 1);
23896 if (orig_after)
23897 convert_move (orig_after, after, 1);
23899 else if (orig_after && after != orig_after)
23900 emit_move_insn (orig_after, after);
23903 /* Emit instructions to move SRC to DST. Called by splitters for
23904 multi-register moves. It will emit at most one instruction for
23905 each register that is accessed; that is, it won't emit li/lis pairs
23906 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23907 register. */
23909 void
23910 rs6000_split_multireg_move (rtx dst, rtx src)
23912 /* The register number of the first register being moved. */
23913 int reg;
23914 /* The mode that is to be moved. */
23915 machine_mode mode;
23916 /* The mode that the move is being done in, and its size. */
23917 machine_mode reg_mode;
23918 int reg_mode_size;
23919 /* The number of registers that will be moved. */
23920 int nregs;
23922 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23923 mode = GET_MODE (dst);
23924 nregs = hard_regno_nregs (reg, mode);
23925 if (FP_REGNO_P (reg))
23926 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23927 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23928 else if (ALTIVEC_REGNO_P (reg))
23929 reg_mode = V16QImode;
23930 else
23931 reg_mode = word_mode;
23932 reg_mode_size = GET_MODE_SIZE (reg_mode);
23934 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23936 /* TDmode residing in FP registers is special, since the ISA requires that
23937 the lower-numbered word of a register pair is always the most significant
23938 word, even in little-endian mode. This does not match the usual subreg
23939 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23940 the appropriate constituent registers "by hand" in little-endian mode.
23942 Note we do not need to check for destructive overlap here since TDmode
23943 can only reside in even/odd register pairs. */
23944 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23946 rtx p_src, p_dst;
23947 int i;
23949 for (i = 0; i < nregs; i++)
23951 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23952 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23953 else
23954 p_src = simplify_gen_subreg (reg_mode, src, mode,
23955 i * reg_mode_size);
23957 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23958 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23959 else
23960 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23961 i * reg_mode_size);
23963 emit_insn (gen_rtx_SET (p_dst, p_src));
23966 return;
23969 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23971 /* Move register range backwards, if we might have destructive
23972 overlap. */
23973 int i;
23974 for (i = nregs - 1; i >= 0; i--)
23975 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23976 i * reg_mode_size),
23977 simplify_gen_subreg (reg_mode, src, mode,
23978 i * reg_mode_size)));
23980 else
23982 int i;
23983 int j = -1;
23984 bool used_update = false;
23985 rtx restore_basereg = NULL_RTX;
23987 if (MEM_P (src) && INT_REGNO_P (reg))
23989 rtx breg;
23991 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23992 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23994 rtx delta_rtx;
23995 breg = XEXP (XEXP (src, 0), 0);
23996 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23997 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23998 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23999 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24000 src = replace_equiv_address (src, breg);
24002 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
24004 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
24006 rtx basereg = XEXP (XEXP (src, 0), 0);
24007 if (TARGET_UPDATE)
24009 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
24010 emit_insn (gen_rtx_SET (ndst,
24011 gen_rtx_MEM (reg_mode,
24012 XEXP (src, 0))));
24013 used_update = true;
24015 else
24016 emit_insn (gen_rtx_SET (basereg,
24017 XEXP (XEXP (src, 0), 1)));
24018 src = replace_equiv_address (src, basereg);
24020 else
24022 rtx basereg = gen_rtx_REG (Pmode, reg);
24023 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24024 src = replace_equiv_address (src, basereg);
24028 breg = XEXP (src, 0);
24029 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24030 breg = XEXP (breg, 0);
24032 /* If the base register we are using to address memory is
24033 also a destination reg, then change that register last. */
24034 if (REG_P (breg)
24035 && REGNO (breg) >= REGNO (dst)
24036 && REGNO (breg) < REGNO (dst) + nregs)
24037 j = REGNO (breg) - REGNO (dst);
24039 else if (MEM_P (dst) && INT_REGNO_P (reg))
24041 rtx breg;
24043 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24044 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24046 rtx delta_rtx;
24047 breg = XEXP (XEXP (dst, 0), 0);
24048 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24049 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24050 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24052 /* We have to update the breg before doing the store.
24053 Use store with update, if available. */
24055 if (TARGET_UPDATE)
24057 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24058 emit_insn (TARGET_32BIT
24059 ? (TARGET_POWERPC64
24060 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24061 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24062 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24063 used_update = true;
24065 else
24066 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24067 dst = replace_equiv_address (dst, breg);
24069 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
24070 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24072 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24074 rtx basereg = XEXP (XEXP (dst, 0), 0);
24075 if (TARGET_UPDATE)
24077 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24078 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24079 XEXP (dst, 0)),
24080 nsrc));
24081 used_update = true;
24083 else
24084 emit_insn (gen_rtx_SET (basereg,
24085 XEXP (XEXP (dst, 0), 1)));
24086 dst = replace_equiv_address (dst, basereg);
24088 else
24090 rtx basereg = XEXP (XEXP (dst, 0), 0);
24091 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24092 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24093 && REG_P (basereg)
24094 && REG_P (offsetreg)
24095 && REGNO (basereg) != REGNO (offsetreg));
24096 if (REGNO (basereg) == 0)
24098 rtx tmp = offsetreg;
24099 offsetreg = basereg;
24100 basereg = tmp;
24102 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24103 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24104 dst = replace_equiv_address (dst, basereg);
24107 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24108 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24111 for (i = 0; i < nregs; i++)
24113 /* Calculate index to next subword. */
24114 ++j;
24115 if (j == nregs)
24116 j = 0;
24118 /* If compiler already emitted move of first word by
24119 store with update, no need to do anything. */
24120 if (j == 0 && used_update)
24121 continue;
24123 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24124 j * reg_mode_size),
24125 simplify_gen_subreg (reg_mode, src, mode,
24126 j * reg_mode_size)));
24128 if (restore_basereg != NULL_RTX)
24129 emit_insn (restore_basereg);
24134 /* This page contains routines that are used to determine what the
24135 function prologue and epilogue code will do and write them out. */
24137 /* Determine whether the REG is really used. */
24139 static bool
24140 save_reg_p (int reg)
24142 /* We need to mark the PIC offset register live for the same conditions
24143 as it is set up, or otherwise it won't be saved before we clobber it. */
24145 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24147 /* When calling eh_return, we must return true for all the cases
24148 where conditional_register_usage marks the PIC offset reg
24149 call used. */
24150 if (TARGET_TOC && TARGET_MINIMAL_TOC
24151 && (crtl->calls_eh_return
24152 || df_regs_ever_live_p (reg)
24153 || !constant_pool_empty_p ()))
24154 return true;
24156 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24157 && flag_pic)
24158 return true;
24161 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24164 /* Return the first fixed-point register that is required to be
24165 saved. 32 if none. */
24168 first_reg_to_save (void)
24170 int first_reg;
24172 /* Find lowest numbered live register. */
24173 for (first_reg = 13; first_reg <= 31; first_reg++)
24174 if (save_reg_p (first_reg))
24175 break;
24177 #if TARGET_MACHO
24178 if (flag_pic
24179 && crtl->uses_pic_offset_table
24180 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24181 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24182 #endif
24184 return first_reg;
24187 /* Similar, for FP regs. */
24190 first_fp_reg_to_save (void)
24192 int first_reg;
24194 /* Find lowest numbered live register. */
24195 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24196 if (save_reg_p (first_reg))
24197 break;
24199 return first_reg;
24202 /* Similar, for AltiVec regs. */
24204 static int
24205 first_altivec_reg_to_save (void)
24207 int i;
24209 /* Stack frame remains as is unless we are in AltiVec ABI. */
24210 if (! TARGET_ALTIVEC_ABI)
24211 return LAST_ALTIVEC_REGNO + 1;
24213 /* On Darwin, the unwind routines are compiled without
24214 TARGET_ALTIVEC, and use save_world to save/restore the
24215 altivec registers when necessary. */
24216 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24217 && ! TARGET_ALTIVEC)
24218 return FIRST_ALTIVEC_REGNO + 20;
24220 /* Find lowest numbered live register. */
24221 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24222 if (save_reg_p (i))
24223 break;
24225 return i;
24228 /* Return a 32-bit mask of the AltiVec registers we need to set in
24229 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24230 the 32-bit word is 0. */
24232 static unsigned int
24233 compute_vrsave_mask (void)
24235 unsigned int i, mask = 0;
24237 /* On Darwin, the unwind routines are compiled without
24238 TARGET_ALTIVEC, and use save_world to save/restore the
24239 call-saved altivec registers when necessary. */
24240 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24241 && ! TARGET_ALTIVEC)
24242 mask |= 0xFFF;
24244 /* First, find out if we use _any_ altivec registers. */
24245 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24246 if (df_regs_ever_live_p (i))
24247 mask |= ALTIVEC_REG_BIT (i);
24249 if (mask == 0)
24250 return mask;
24252 /* Next, remove the argument registers from the set. These must
24253 be in the VRSAVE mask set by the caller, so we don't need to add
24254 them in again. More importantly, the mask we compute here is
24255 used to generate CLOBBERs in the set_vrsave insn, and we do not
24256 wish the argument registers to die. */
24257 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24258 mask &= ~ALTIVEC_REG_BIT (i);
24260 /* Similarly, remove the return value from the set. */
24262 bool yes = false;
24263 diddle_return_value (is_altivec_return_reg, &yes);
24264 if (yes)
24265 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24268 return mask;
24271 /* For a very restricted set of circumstances, we can cut down the
24272 size of prologues/epilogues by calling our own save/restore-the-world
24273 routines. */
24275 static void
24276 compute_save_world_info (rs6000_stack_t *info)
24278 info->world_save_p = 1;
24279 info->world_save_p
24280 = (WORLD_SAVE_P (info)
24281 && DEFAULT_ABI == ABI_DARWIN
24282 && !cfun->has_nonlocal_label
24283 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24284 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24285 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24286 && info->cr_save_p);
24288 /* This will not work in conjunction with sibcalls. Make sure there
24289 are none. (This check is expensive, but seldom executed.) */
24290 if (WORLD_SAVE_P (info))
24292 rtx_insn *insn;
24293 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24294 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24296 info->world_save_p = 0;
24297 break;
24301 if (WORLD_SAVE_P (info))
24303 /* Even if we're not touching VRsave, make sure there's room on the
24304 stack for it, if it looks like we're calling SAVE_WORLD, which
24305 will attempt to save it. */
24306 info->vrsave_size = 4;
24308 /* If we are going to save the world, we need to save the link register too. */
24309 info->lr_save_p = 1;
24311 /* "Save" the VRsave register too if we're saving the world. */
24312 if (info->vrsave_mask == 0)
24313 info->vrsave_mask = compute_vrsave_mask ();
24315 /* Because the Darwin register save/restore routines only handle
24316 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24317 check. */
24318 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24319 && (info->first_altivec_reg_save
24320 >= FIRST_SAVED_ALTIVEC_REGNO));
24323 return;
24327 static void
24328 is_altivec_return_reg (rtx reg, void *xyes)
24330 bool *yes = (bool *) xyes;
24331 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24332 *yes = true;
24336 /* Return whether REG is a global user reg or has been specifed by
24337 -ffixed-REG. We should not restore these, and so cannot use
24338 lmw or out-of-line restore functions if there are any. We also
24339 can't save them (well, emit frame notes for them), because frame
24340 unwinding during exception handling will restore saved registers. */
24342 static bool
24343 fixed_reg_p (int reg)
24345 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24346 backend sets it, overriding anything the user might have given. */
24347 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24348 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24349 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24350 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24351 return false;
24353 return fixed_regs[reg];
24356 /* Determine the strategy for savings/restoring registers. */
24358 enum {
24359 SAVE_MULTIPLE = 0x1,
24360 SAVE_INLINE_GPRS = 0x2,
24361 SAVE_INLINE_FPRS = 0x4,
24362 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24363 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24364 SAVE_INLINE_VRS = 0x20,
24365 REST_MULTIPLE = 0x100,
24366 REST_INLINE_GPRS = 0x200,
24367 REST_INLINE_FPRS = 0x400,
24368 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24369 REST_INLINE_VRS = 0x1000
24372 static int
24373 rs6000_savres_strategy (rs6000_stack_t *info,
24374 bool using_static_chain_p)
24376 int strategy = 0;
24378 /* Select between in-line and out-of-line save and restore of regs.
24379 First, all the obvious cases where we don't use out-of-line. */
24380 if (crtl->calls_eh_return
24381 || cfun->machine->ra_need_lr)
24382 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24383 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24384 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24386 if (info->first_gp_reg_save == 32)
24387 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24389 if (info->first_fp_reg_save == 64
24390 /* The out-of-line FP routines use double-precision stores;
24391 we can't use those routines if we don't have such stores. */
24392 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24393 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24395 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24396 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24398 /* Define cutoff for using out-of-line functions to save registers. */
24399 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24401 if (!optimize_size)
24403 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24404 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24405 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24407 else
24409 /* Prefer out-of-line restore if it will exit. */
24410 if (info->first_fp_reg_save > 61)
24411 strategy |= SAVE_INLINE_FPRS;
24412 if (info->first_gp_reg_save > 29)
24414 if (info->first_fp_reg_save == 64)
24415 strategy |= SAVE_INLINE_GPRS;
24416 else
24417 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24419 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24420 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24423 else if (DEFAULT_ABI == ABI_DARWIN)
24425 if (info->first_fp_reg_save > 60)
24426 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24427 if (info->first_gp_reg_save > 29)
24428 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24429 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24431 else
24433 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24434 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24435 || info->first_fp_reg_save > 61)
24436 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24437 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24438 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24441 /* Don't bother to try to save things out-of-line if r11 is occupied
24442 by the static chain. It would require too much fiddling and the
24443 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24444 pointer on Darwin, and AIX uses r1 or r12. */
24445 if (using_static_chain_p
24446 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24447 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24448 | SAVE_INLINE_GPRS
24449 | SAVE_INLINE_VRS);
24451 /* Don't ever restore fixed regs. That means we can't use the
24452 out-of-line register restore functions if a fixed reg is in the
24453 range of regs restored. */
24454 if (!(strategy & REST_INLINE_FPRS))
24455 for (int i = info->first_fp_reg_save; i < 64; i++)
24456 if (fixed_regs[i])
24458 strategy |= REST_INLINE_FPRS;
24459 break;
24462 /* We can only use the out-of-line routines to restore fprs if we've
24463 saved all the registers from first_fp_reg_save in the prologue.
24464 Otherwise, we risk loading garbage. Of course, if we have saved
24465 out-of-line then we know we haven't skipped any fprs. */
24466 if ((strategy & SAVE_INLINE_FPRS)
24467 && !(strategy & REST_INLINE_FPRS))
24468 for (int i = info->first_fp_reg_save; i < 64; i++)
24469 if (!save_reg_p (i))
24471 strategy |= REST_INLINE_FPRS;
24472 break;
24475 /* Similarly, for altivec regs. */
24476 if (!(strategy & REST_INLINE_VRS))
24477 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24478 if (fixed_regs[i])
24480 strategy |= REST_INLINE_VRS;
24481 break;
24484 if ((strategy & SAVE_INLINE_VRS)
24485 && !(strategy & REST_INLINE_VRS))
24486 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24487 if (!save_reg_p (i))
24489 strategy |= REST_INLINE_VRS;
24490 break;
24493 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24494 saved is an out-of-line save or restore. Set up the value for
24495 the next test (excluding out-of-line gprs). */
24496 bool lr_save_p = (info->lr_save_p
24497 || !(strategy & SAVE_INLINE_FPRS)
24498 || !(strategy & SAVE_INLINE_VRS)
24499 || !(strategy & REST_INLINE_FPRS)
24500 || !(strategy & REST_INLINE_VRS));
24502 if (TARGET_MULTIPLE
24503 && !TARGET_POWERPC64
24504 && info->first_gp_reg_save < 31
24505 && !(flag_shrink_wrap
24506 && flag_shrink_wrap_separate
24507 && optimize_function_for_speed_p (cfun)))
24509 int count = 0;
24510 for (int i = info->first_gp_reg_save; i < 32; i++)
24511 if (save_reg_p (i))
24512 count++;
24514 if (count <= 1)
24515 /* Don't use store multiple if only one reg needs to be
24516 saved. This can occur for example when the ABI_V4 pic reg
24517 (r30) needs to be saved to make calls, but r31 is not
24518 used. */
24519 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24520 else
24522 /* Prefer store multiple for saves over out-of-line
24523 routines, since the store-multiple instruction will
24524 always be smaller. */
24525 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24527 /* The situation is more complicated with load multiple.
24528 We'd prefer to use the out-of-line routines for restores,
24529 since the "exit" out-of-line routines can handle the
24530 restore of LR and the frame teardown. However if doesn't
24531 make sense to use the out-of-line routine if that is the
24532 only reason we'd need to save LR, and we can't use the
24533 "exit" out-of-line gpr restore if we have saved some
24534 fprs; In those cases it is advantageous to use load
24535 multiple when available. */
24536 if (info->first_fp_reg_save != 64 || !lr_save_p)
24537 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24541 /* Using the "exit" out-of-line routine does not improve code size
24542 if using it would require lr to be saved and if only saving one
24543 or two gprs. */
24544 else if (!lr_save_p && info->first_gp_reg_save > 29)
24545 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24547 /* Don't ever restore fixed regs. */
24548 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24549 for (int i = info->first_gp_reg_save; i < 32; i++)
24550 if (fixed_reg_p (i))
24552 strategy |= REST_INLINE_GPRS;
24553 strategy &= ~REST_MULTIPLE;
24554 break;
24557 /* We can only use load multiple or the out-of-line routines to
24558 restore gprs if we've saved all the registers from
24559 first_gp_reg_save. Otherwise, we risk loading garbage.
24560 Of course, if we have saved out-of-line or used stmw then we know
24561 we haven't skipped any gprs. */
24562 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24563 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24564 for (int i = info->first_gp_reg_save; i < 32; i++)
24565 if (!save_reg_p (i))
24567 strategy |= REST_INLINE_GPRS;
24568 strategy &= ~REST_MULTIPLE;
24569 break;
24572 if (TARGET_ELF && TARGET_64BIT)
24574 if (!(strategy & SAVE_INLINE_FPRS))
24575 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24576 else if (!(strategy & SAVE_INLINE_GPRS)
24577 && info->first_fp_reg_save == 64)
24578 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24580 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24581 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24583 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24584 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24586 return strategy;
24589 /* Calculate the stack information for the current function. This is
24590 complicated by having two separate calling sequences, the AIX calling
24591 sequence and the V.4 calling sequence.
24593 AIX (and Darwin/Mac OS X) stack frames look like:
24594 32-bit 64-bit
24595 SP----> +---------------------------------------+
24596 | back chain to caller | 0 0
24597 +---------------------------------------+
24598 | saved CR | 4 8 (8-11)
24599 +---------------------------------------+
24600 | saved LR | 8 16
24601 +---------------------------------------+
24602 | reserved for compilers | 12 24
24603 +---------------------------------------+
24604 | reserved for binders | 16 32
24605 +---------------------------------------+
24606 | saved TOC pointer | 20 40
24607 +---------------------------------------+
24608 | Parameter save area (+padding*) (P) | 24 48
24609 +---------------------------------------+
24610 | Alloca space (A) | 24+P etc.
24611 +---------------------------------------+
24612 | Local variable space (L) | 24+P+A
24613 +---------------------------------------+
24614 | Float/int conversion temporary (X) | 24+P+A+L
24615 +---------------------------------------+
24616 | Save area for AltiVec registers (W) | 24+P+A+L+X
24617 +---------------------------------------+
24618 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24619 +---------------------------------------+
24620 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24621 +---------------------------------------+
24622 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24623 +---------------------------------------+
24624 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24625 +---------------------------------------+
24626 old SP->| back chain to caller's caller |
24627 +---------------------------------------+
24629 * If the alloca area is present, the parameter save area is
24630 padded so that the former starts 16-byte aligned.
24632 The required alignment for AIX configurations is two words (i.e., 8
24633 or 16 bytes).
24635 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24637 SP----> +---------------------------------------+
24638 | Back chain to caller | 0
24639 +---------------------------------------+
24640 | Save area for CR | 8
24641 +---------------------------------------+
24642 | Saved LR | 16
24643 +---------------------------------------+
24644 | Saved TOC pointer | 24
24645 +---------------------------------------+
24646 | Parameter save area (+padding*) (P) | 32
24647 +---------------------------------------+
24648 | Alloca space (A) | 32+P
24649 +---------------------------------------+
24650 | Local variable space (L) | 32+P+A
24651 +---------------------------------------+
24652 | Save area for AltiVec registers (W) | 32+P+A+L
24653 +---------------------------------------+
24654 | AltiVec alignment padding (Y) | 32+P+A+L+W
24655 +---------------------------------------+
24656 | Save area for GP registers (G) | 32+P+A+L+W+Y
24657 +---------------------------------------+
24658 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24659 +---------------------------------------+
24660 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24661 +---------------------------------------+
24663 * If the alloca area is present, the parameter save area is
24664 padded so that the former starts 16-byte aligned.
24666 V.4 stack frames look like:
24668 SP----> +---------------------------------------+
24669 | back chain to caller | 0
24670 +---------------------------------------+
24671 | caller's saved LR | 4
24672 +---------------------------------------+
24673 | Parameter save area (+padding*) (P) | 8
24674 +---------------------------------------+
24675 | Alloca space (A) | 8+P
24676 +---------------------------------------+
24677 | Varargs save area (V) | 8+P+A
24678 +---------------------------------------+
24679 | Local variable space (L) | 8+P+A+V
24680 +---------------------------------------+
24681 | Float/int conversion temporary (X) | 8+P+A+V+L
24682 +---------------------------------------+
24683 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24684 +---------------------------------------+
24685 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24686 +---------------------------------------+
24687 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24688 +---------------------------------------+
24689 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24690 +---------------------------------------+
24691 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24692 +---------------------------------------+
24693 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24694 +---------------------------------------+
24695 old SP->| back chain to caller's caller |
24696 +---------------------------------------+
24698 * If the alloca area is present and the required alignment is
24699 16 bytes, the parameter save area is padded so that the
24700 alloca area starts 16-byte aligned.
24702 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24703 given. (But note below and in sysv4.h that we require only 8 and
24704 may round up the size of our stack frame anyways. The historical
24705 reason is early versions of powerpc-linux which didn't properly
24706 align the stack at program startup. A happy side-effect is that
24707 -mno-eabi libraries can be used with -meabi programs.)
24709 The EABI configuration defaults to the V.4 layout. However,
24710 the stack alignment requirements may differ. If -mno-eabi is not
24711 given, the required stack alignment is 8 bytes; if -mno-eabi is
24712 given, the required alignment is 16 bytes. (But see V.4 comment
24713 above.) */
24715 #ifndef ABI_STACK_BOUNDARY
24716 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24717 #endif
24719 static rs6000_stack_t *
24720 rs6000_stack_info (void)
24722 /* We should never be called for thunks, we are not set up for that. */
24723 gcc_assert (!cfun->is_thunk);
24725 rs6000_stack_t *info = &stack_info;
24726 int reg_size = TARGET_32BIT ? 4 : 8;
24727 int ehrd_size;
24728 int ehcr_size;
24729 int save_align;
24730 int first_gp;
24731 HOST_WIDE_INT non_fixed_size;
24732 bool using_static_chain_p;
24734 if (reload_completed && info->reload_completed)
24735 return info;
24737 memset (info, 0, sizeof (*info));
24738 info->reload_completed = reload_completed;
24740 /* Select which calling sequence. */
24741 info->abi = DEFAULT_ABI;
24743 /* Calculate which registers need to be saved & save area size. */
24744 info->first_gp_reg_save = first_reg_to_save ();
24745 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24746 even if it currently looks like we won't. Reload may need it to
24747 get at a constant; if so, it will have already created a constant
24748 pool entry for it. */
24749 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24750 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24751 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24752 && crtl->uses_const_pool
24753 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24754 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24755 else
24756 first_gp = info->first_gp_reg_save;
24758 info->gp_size = reg_size * (32 - first_gp);
24760 info->first_fp_reg_save = first_fp_reg_to_save ();
24761 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24763 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24764 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24765 - info->first_altivec_reg_save);
24767 /* Does this function call anything? */
24768 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24770 /* Determine if we need to save the condition code registers. */
24771 if (save_reg_p (CR2_REGNO)
24772 || save_reg_p (CR3_REGNO)
24773 || save_reg_p (CR4_REGNO))
24775 info->cr_save_p = 1;
24776 if (DEFAULT_ABI == ABI_V4)
24777 info->cr_size = reg_size;
24780 /* If the current function calls __builtin_eh_return, then we need
24781 to allocate stack space for registers that will hold data for
24782 the exception handler. */
24783 if (crtl->calls_eh_return)
24785 unsigned int i;
24786 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24787 continue;
24789 ehrd_size = i * UNITS_PER_WORD;
24791 else
24792 ehrd_size = 0;
24794 /* In the ELFv2 ABI, we also need to allocate space for separate
24795 CR field save areas if the function calls __builtin_eh_return. */
24796 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24798 /* This hard-codes that we have three call-saved CR fields. */
24799 ehcr_size = 3 * reg_size;
24800 /* We do *not* use the regular CR save mechanism. */
24801 info->cr_save_p = 0;
24803 else
24804 ehcr_size = 0;
24806 /* Determine various sizes. */
24807 info->reg_size = reg_size;
24808 info->fixed_size = RS6000_SAVE_AREA;
24809 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24810 if (cfun->calls_alloca)
24811 info->parm_size =
24812 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24813 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24814 else
24815 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24816 TARGET_ALTIVEC ? 16 : 8);
24817 if (FRAME_GROWS_DOWNWARD)
24818 info->vars_size
24819 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24820 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24821 - (info->fixed_size + info->vars_size + info->parm_size);
24823 if (TARGET_ALTIVEC_ABI)
24824 info->vrsave_mask = compute_vrsave_mask ();
24826 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24827 info->vrsave_size = 4;
24829 compute_save_world_info (info);
24831 /* Calculate the offsets. */
24832 switch (DEFAULT_ABI)
24834 case ABI_NONE:
24835 default:
24836 gcc_unreachable ();
24838 case ABI_AIX:
24839 case ABI_ELFv2:
24840 case ABI_DARWIN:
24841 info->fp_save_offset = -info->fp_size;
24842 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24844 if (TARGET_ALTIVEC_ABI)
24846 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24848 /* Align stack so vector save area is on a quadword boundary.
24849 The padding goes above the vectors. */
24850 if (info->altivec_size != 0)
24851 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24853 info->altivec_save_offset = info->vrsave_save_offset
24854 - info->altivec_padding_size
24855 - info->altivec_size;
24856 gcc_assert (info->altivec_size == 0
24857 || info->altivec_save_offset % 16 == 0);
24859 /* Adjust for AltiVec case. */
24860 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24862 else
24863 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24865 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24866 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24867 info->lr_save_offset = 2*reg_size;
24868 break;
24870 case ABI_V4:
24871 info->fp_save_offset = -info->fp_size;
24872 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24873 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24875 if (TARGET_ALTIVEC_ABI)
24877 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24879 /* Align stack so vector save area is on a quadword boundary. */
24880 if (info->altivec_size != 0)
24881 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24883 info->altivec_save_offset = info->vrsave_save_offset
24884 - info->altivec_padding_size
24885 - info->altivec_size;
24887 /* Adjust for AltiVec case. */
24888 info->ehrd_offset = info->altivec_save_offset;
24890 else
24891 info->ehrd_offset = info->cr_save_offset;
24893 info->ehrd_offset -= ehrd_size;
24894 info->lr_save_offset = reg_size;
24897 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24898 info->save_size = RS6000_ALIGN (info->fp_size
24899 + info->gp_size
24900 + info->altivec_size
24901 + info->altivec_padding_size
24902 + ehrd_size
24903 + ehcr_size
24904 + info->cr_size
24905 + info->vrsave_size,
24906 save_align);
24908 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24910 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24911 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24913 /* Determine if we need to save the link register. */
24914 if (info->calls_p
24915 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24916 && crtl->profile
24917 && !TARGET_PROFILE_KERNEL)
24918 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24919 #ifdef TARGET_RELOCATABLE
24920 || (DEFAULT_ABI == ABI_V4
24921 && (TARGET_RELOCATABLE || flag_pic > 1)
24922 && !constant_pool_empty_p ())
24923 #endif
24924 || rs6000_ra_ever_killed ())
24925 info->lr_save_p = 1;
24927 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24928 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24929 && call_used_regs[STATIC_CHAIN_REGNUM]);
24930 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24932 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24933 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24934 || !(info->savres_strategy & SAVE_INLINE_VRS)
24935 || !(info->savres_strategy & REST_INLINE_GPRS)
24936 || !(info->savres_strategy & REST_INLINE_FPRS)
24937 || !(info->savres_strategy & REST_INLINE_VRS))
24938 info->lr_save_p = 1;
24940 if (info->lr_save_p)
24941 df_set_regs_ever_live (LR_REGNO, true);
24943 /* Determine if we need to allocate any stack frame:
24945 For AIX we need to push the stack if a frame pointer is needed
24946 (because the stack might be dynamically adjusted), if we are
24947 debugging, if we make calls, or if the sum of fp_save, gp_save,
24948 and local variables are more than the space needed to save all
24949 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24950 + 18*8 = 288 (GPR13 reserved).
24952 For V.4 we don't have the stack cushion that AIX uses, but assume
24953 that the debugger can handle stackless frames. */
24955 if (info->calls_p)
24956 info->push_p = 1;
24958 else if (DEFAULT_ABI == ABI_V4)
24959 info->push_p = non_fixed_size != 0;
24961 else if (frame_pointer_needed)
24962 info->push_p = 1;
24964 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24965 info->push_p = 1;
24967 else
24968 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24970 return info;
24973 static void
24974 debug_stack_info (rs6000_stack_t *info)
24976 const char *abi_string;
24978 if (! info)
24979 info = rs6000_stack_info ();
24981 fprintf (stderr, "\nStack information for function %s:\n",
24982 ((current_function_decl && DECL_NAME (current_function_decl))
24983 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24984 : "<unknown>"));
24986 switch (info->abi)
24988 default: abi_string = "Unknown"; break;
24989 case ABI_NONE: abi_string = "NONE"; break;
24990 case ABI_AIX: abi_string = "AIX"; break;
24991 case ABI_ELFv2: abi_string = "ELFv2"; break;
24992 case ABI_DARWIN: abi_string = "Darwin"; break;
24993 case ABI_V4: abi_string = "V.4"; break;
24996 fprintf (stderr, "\tABI = %5s\n", abi_string);
24998 if (TARGET_ALTIVEC_ABI)
24999 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
25001 if (info->first_gp_reg_save != 32)
25002 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
25004 if (info->first_fp_reg_save != 64)
25005 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
25007 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
25008 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
25009 info->first_altivec_reg_save);
25011 if (info->lr_save_p)
25012 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
25014 if (info->cr_save_p)
25015 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
25017 if (info->vrsave_mask)
25018 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
25020 if (info->push_p)
25021 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25023 if (info->calls_p)
25024 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25026 if (info->gp_size)
25027 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25029 if (info->fp_size)
25030 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25032 if (info->altivec_size)
25033 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25034 info->altivec_save_offset);
25036 if (info->vrsave_size)
25037 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25038 info->vrsave_save_offset);
25040 if (info->lr_save_p)
25041 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25043 if (info->cr_save_p)
25044 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25046 if (info->varargs_save_offset)
25047 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25049 if (info->total_size)
25050 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25051 info->total_size);
25053 if (info->vars_size)
25054 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25055 info->vars_size);
25057 if (info->parm_size)
25058 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25060 if (info->fixed_size)
25061 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25063 if (info->gp_size)
25064 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25066 if (info->fp_size)
25067 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25069 if (info->altivec_size)
25070 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25072 if (info->vrsave_size)
25073 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25075 if (info->altivec_padding_size)
25076 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25077 info->altivec_padding_size);
25079 if (info->cr_size)
25080 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25082 if (info->save_size)
25083 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25085 if (info->reg_size != 4)
25086 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25088 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25090 fprintf (stderr, "\n");
25094 rs6000_return_addr (int count, rtx frame)
25096 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25097 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25098 if (count != 0
25099 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25101 cfun->machine->ra_needs_full_frame = 1;
25103 if (count == 0)
25104 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25105 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25106 frame = stack_pointer_rtx;
25107 rtx prev_frame_addr = memory_address (Pmode, frame);
25108 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25109 rtx lr_save_off = plus_constant (Pmode,
25110 prev_frame, RETURN_ADDRESS_OFFSET);
25111 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25112 return gen_rtx_MEM (Pmode, lr_save_addr);
25115 cfun->machine->ra_need_lr = 1;
25116 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25119 /* Say whether a function is a candidate for sibcall handling or not. */
25121 static bool
25122 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25124 tree fntype;
25126 if (decl)
25127 fntype = TREE_TYPE (decl);
25128 else
25129 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25131 /* We can't do it if the called function has more vector parameters
25132 than the current function; there's nowhere to put the VRsave code. */
25133 if (TARGET_ALTIVEC_ABI
25134 && TARGET_ALTIVEC_VRSAVE
25135 && !(decl && decl == current_function_decl))
25137 function_args_iterator args_iter;
25138 tree type;
25139 int nvreg = 0;
25141 /* Functions with vector parameters are required to have a
25142 prototype, so the argument type info must be available
25143 here. */
25144 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25145 if (TREE_CODE (type) == VECTOR_TYPE
25146 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25147 nvreg++;
25149 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25150 if (TREE_CODE (type) == VECTOR_TYPE
25151 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25152 nvreg--;
25154 if (nvreg > 0)
25155 return false;
25158 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25159 functions, because the callee may have a different TOC pointer to
25160 the caller and there's no way to ensure we restore the TOC when
25161 we return. With the secure-plt SYSV ABI we can't make non-local
25162 calls when -fpic/PIC because the plt call stubs use r30. */
25163 if (DEFAULT_ABI == ABI_DARWIN
25164 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25165 && decl
25166 && !DECL_EXTERNAL (decl)
25167 && !DECL_WEAK (decl)
25168 && (*targetm.binds_local_p) (decl))
25169 || (DEFAULT_ABI == ABI_V4
25170 && (!TARGET_SECURE_PLT
25171 || !flag_pic
25172 || (decl
25173 && (*targetm.binds_local_p) (decl)))))
25175 tree attr_list = TYPE_ATTRIBUTES (fntype);
25177 if (!lookup_attribute ("longcall", attr_list)
25178 || lookup_attribute ("shortcall", attr_list))
25179 return true;
25182 return false;
25185 static int
25186 rs6000_ra_ever_killed (void)
25188 rtx_insn *top;
25189 rtx reg;
25190 rtx_insn *insn;
25192 if (cfun->is_thunk)
25193 return 0;
25195 if (cfun->machine->lr_save_state)
25196 return cfun->machine->lr_save_state - 1;
25198 /* regs_ever_live has LR marked as used if any sibcalls are present,
25199 but this should not force saving and restoring in the
25200 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25201 clobbers LR, so that is inappropriate. */
25203 /* Also, the prologue can generate a store into LR that
25204 doesn't really count, like this:
25206 move LR->R0
25207 bcl to set PIC register
25208 move LR->R31
25209 move R0->LR
25211 When we're called from the epilogue, we need to avoid counting
25212 this as a store. */
25214 push_topmost_sequence ();
25215 top = get_insns ();
25216 pop_topmost_sequence ();
25217 reg = gen_rtx_REG (Pmode, LR_REGNO);
25219 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25221 if (INSN_P (insn))
25223 if (CALL_P (insn))
25225 if (!SIBLING_CALL_P (insn))
25226 return 1;
25228 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25229 return 1;
25230 else if (set_of (reg, insn) != NULL_RTX
25231 && !prologue_epilogue_contains (insn))
25232 return 1;
25235 return 0;
25238 /* Emit instructions needed to load the TOC register.
25239 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25240 a constant pool; or for SVR4 -fpic. */
25242 void
25243 rs6000_emit_load_toc_table (int fromprolog)
25245 rtx dest;
25246 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25248 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25250 char buf[30];
25251 rtx lab, tmp1, tmp2, got;
25253 lab = gen_label_rtx ();
25254 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25255 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25256 if (flag_pic == 2)
25258 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25259 need_toc_init = 1;
25261 else
25262 got = rs6000_got_sym ();
25263 tmp1 = tmp2 = dest;
25264 if (!fromprolog)
25266 tmp1 = gen_reg_rtx (Pmode);
25267 tmp2 = gen_reg_rtx (Pmode);
25269 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25270 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25271 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25272 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25274 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25276 emit_insn (gen_load_toc_v4_pic_si ());
25277 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25279 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25281 char buf[30];
25282 rtx temp0 = (fromprolog
25283 ? gen_rtx_REG (Pmode, 0)
25284 : gen_reg_rtx (Pmode));
25286 if (fromprolog)
25288 rtx symF, symL;
25290 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25291 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25293 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25294 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25296 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25297 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25298 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25300 else
25302 rtx tocsym, lab;
25304 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25305 need_toc_init = 1;
25306 lab = gen_label_rtx ();
25307 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25308 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25309 if (TARGET_LINK_STACK)
25310 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25311 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25313 emit_insn (gen_addsi3 (dest, temp0, dest));
25315 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25317 /* This is for AIX code running in non-PIC ELF32. */
25318 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25320 need_toc_init = 1;
25321 emit_insn (gen_elf_high (dest, realsym));
25322 emit_insn (gen_elf_low (dest, dest, realsym));
25324 else
25326 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25328 if (TARGET_32BIT)
25329 emit_insn (gen_load_toc_aix_si (dest));
25330 else
25331 emit_insn (gen_load_toc_aix_di (dest));
25335 /* Emit instructions to restore the link register after determining where
25336 its value has been stored. */
25338 void
25339 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25341 rs6000_stack_t *info = rs6000_stack_info ();
25342 rtx operands[2];
25344 operands[0] = source;
25345 operands[1] = scratch;
25347 if (info->lr_save_p)
25349 rtx frame_rtx = stack_pointer_rtx;
25350 HOST_WIDE_INT sp_offset = 0;
25351 rtx tmp;
25353 if (frame_pointer_needed
25354 || cfun->calls_alloca
25355 || info->total_size > 32767)
25357 tmp = gen_frame_mem (Pmode, frame_rtx);
25358 emit_move_insn (operands[1], tmp);
25359 frame_rtx = operands[1];
25361 else if (info->push_p)
25362 sp_offset = info->total_size;
25364 tmp = plus_constant (Pmode, frame_rtx,
25365 info->lr_save_offset + sp_offset);
25366 tmp = gen_frame_mem (Pmode, tmp);
25367 emit_move_insn (tmp, operands[0]);
25369 else
25370 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25372 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25373 state of lr_save_p so any change from here on would be a bug. In
25374 particular, stop rs6000_ra_ever_killed from considering the SET
25375 of lr we may have added just above. */
25376 cfun->machine->lr_save_state = info->lr_save_p + 1;
25379 static GTY(()) alias_set_type set = -1;
25381 alias_set_type
25382 get_TOC_alias_set (void)
25384 if (set == -1)
25385 set = new_alias_set ();
25386 return set;
25389 /* This returns nonzero if the current function uses the TOC. This is
25390 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25391 is generated by the ABI_V4 load_toc_* patterns.
25392 Return 2 instead of 1 if the load_toc_* pattern is in the function
25393 partition that doesn't start the function. */
25394 #if TARGET_ELF
25395 static int
25396 uses_TOC (void)
25398 rtx_insn *insn;
25399 int ret = 1;
25401 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25403 if (INSN_P (insn))
25405 rtx pat = PATTERN (insn);
25406 int i;
25408 if (GET_CODE (pat) == PARALLEL)
25409 for (i = 0; i < XVECLEN (pat, 0); i++)
25411 rtx sub = XVECEXP (pat, 0, i);
25412 if (GET_CODE (sub) == USE)
25414 sub = XEXP (sub, 0);
25415 if (GET_CODE (sub) == UNSPEC
25416 && XINT (sub, 1) == UNSPEC_TOC)
25417 return ret;
25421 else if (crtl->has_bb_partition
25422 && NOTE_P (insn)
25423 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25424 ret = 2;
25426 return 0;
25428 #endif
25431 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25433 rtx tocrel, tocreg, hi;
25435 if (TARGET_DEBUG_ADDR)
25437 if (GET_CODE (symbol) == SYMBOL_REF)
25438 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25439 XSTR (symbol, 0));
25440 else
25442 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25443 GET_RTX_NAME (GET_CODE (symbol)));
25444 debug_rtx (symbol);
25448 if (!can_create_pseudo_p ())
25449 df_set_regs_ever_live (TOC_REGISTER, true);
25451 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25452 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25453 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25454 return tocrel;
25456 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25457 if (largetoc_reg != NULL)
25459 emit_move_insn (largetoc_reg, hi);
25460 hi = largetoc_reg;
25462 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25465 /* Issue assembly directives that create a reference to the given DWARF
25466 FRAME_TABLE_LABEL from the current function section. */
25467 void
25468 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25470 fprintf (asm_out_file, "\t.ref %s\n",
25471 (* targetm.strip_name_encoding) (frame_table_label));
25474 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25475 and the change to the stack pointer. */
25477 static void
25478 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25480 rtvec p;
25481 int i;
25482 rtx regs[3];
25484 i = 0;
25485 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25486 if (hard_frame_needed)
25487 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25488 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25489 || (hard_frame_needed
25490 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25491 regs[i++] = fp;
25493 p = rtvec_alloc (i);
25494 while (--i >= 0)
25496 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25497 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25500 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25503 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25504 and set the appropriate attributes for the generated insn. Return the
25505 first insn which adjusts the stack pointer or the last insn before
25506 the stack adjustment loop.
25508 SIZE_INT is used to create the CFI note for the allocation.
25510 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25511 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25513 ORIG_SP contains the backchain value that must be stored at *sp. */
25515 static rtx_insn *
25516 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25518 rtx_insn *insn;
25520 rtx size_rtx = GEN_INT (-size_int);
25521 if (size_int > 32767)
25523 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25524 /* Need a note here so that try_split doesn't get confused. */
25525 if (get_last_insn () == NULL_RTX)
25526 emit_note (NOTE_INSN_DELETED);
25527 insn = emit_move_insn (tmp_reg, size_rtx);
25528 try_split (PATTERN (insn), insn, 0);
25529 size_rtx = tmp_reg;
25532 if (Pmode == SImode)
25533 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25534 stack_pointer_rtx,
25535 size_rtx,
25536 orig_sp));
25537 else
25538 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25539 stack_pointer_rtx,
25540 size_rtx,
25541 orig_sp));
25542 rtx par = PATTERN (insn);
25543 gcc_assert (GET_CODE (par) == PARALLEL);
25544 rtx set = XVECEXP (par, 0, 0);
25545 gcc_assert (GET_CODE (set) == SET);
25546 rtx mem = SET_DEST (set);
25547 gcc_assert (MEM_P (mem));
25548 MEM_NOTRAP_P (mem) = 1;
25549 set_mem_alias_set (mem, get_frame_alias_set ());
25551 RTX_FRAME_RELATED_P (insn) = 1;
25552 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25553 gen_rtx_SET (stack_pointer_rtx,
25554 gen_rtx_PLUS (Pmode,
25555 stack_pointer_rtx,
25556 GEN_INT (-size_int))));
25558 /* Emit a blockage to ensure the allocation/probing insns are
25559 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25560 note for similar reasons. */
25561 if (flag_stack_clash_protection)
25563 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25564 emit_insn (gen_blockage ());
25567 return insn;
25570 static HOST_WIDE_INT
25571 get_stack_clash_protection_probe_interval (void)
25573 return (HOST_WIDE_INT_1U
25574 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25577 static HOST_WIDE_INT
25578 get_stack_clash_protection_guard_size (void)
25580 return (HOST_WIDE_INT_1U
25581 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25584 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25585 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25587 COPY_REG, if non-null, should contain a copy of the original
25588 stack pointer at exit from this function.
25590 This is subtly different than the Ada probing in that it tries hard to
25591 prevent attacks that jump the stack guard. Thus it is never allowed to
25592 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25593 space without a suitable probe. */
25594 static rtx_insn *
25595 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25596 rtx copy_reg)
25598 rtx orig_sp = copy_reg;
25600 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25602 /* Round the size down to a multiple of PROBE_INTERVAL. */
25603 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25605 /* If explicitly requested,
25606 or the rounded size is not the same as the original size
25607 or the the rounded size is greater than a page,
25608 then we will need a copy of the original stack pointer. */
25609 if (rounded_size != orig_size
25610 || rounded_size > probe_interval
25611 || copy_reg)
25613 /* If the caller did not request a copy of the incoming stack
25614 pointer, then we use r0 to hold the copy. */
25615 if (!copy_reg)
25616 orig_sp = gen_rtx_REG (Pmode, 0);
25617 emit_move_insn (orig_sp, stack_pointer_rtx);
25620 /* There's three cases here.
25622 One is a single probe which is the most common and most efficiently
25623 implemented as it does not have to have a copy of the original
25624 stack pointer if there are no residuals.
25626 Second is unrolled allocation/probes which we use if there's just
25627 a few of them. It needs to save the original stack pointer into a
25628 temporary for use as a source register in the allocation/probe.
25630 Last is a loop. This is the most uncommon case and least efficient. */
25631 rtx_insn *retval = NULL;
25632 if (rounded_size == probe_interval)
25634 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25636 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25638 else if (rounded_size <= 8 * probe_interval)
25640 /* The ABI requires using the store with update insns to allocate
25641 space and store the backchain into the stack
25643 So we save the current stack pointer into a temporary, then
25644 emit the store-with-update insns to store the saved stack pointer
25645 into the right location in each new page. */
25646 for (int i = 0; i < rounded_size; i += probe_interval)
25648 rtx_insn *insn
25649 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25651 /* Save the first stack adjustment in RETVAL. */
25652 if (i == 0)
25653 retval = insn;
25656 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25658 else
25660 /* Compute the ending address. */
25661 rtx end_addr
25662 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25663 rtx rs = GEN_INT (-rounded_size);
25664 rtx_insn *insn;
25665 if (add_operand (rs, Pmode))
25666 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25667 else
25669 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25670 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25671 stack_pointer_rtx));
25672 /* Describe the effect of INSN to the CFI engine. */
25673 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25674 gen_rtx_SET (end_addr,
25675 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25676 rs)));
25678 RTX_FRAME_RELATED_P (insn) = 1;
25680 /* Emit the loop. */
25681 if (TARGET_64BIT)
25682 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25683 stack_pointer_rtx, orig_sp,
25684 end_addr));
25685 else
25686 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25687 stack_pointer_rtx, orig_sp,
25688 end_addr));
25689 RTX_FRAME_RELATED_P (retval) = 1;
25690 /* Describe the effect of INSN to the CFI engine. */
25691 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25692 gen_rtx_SET (stack_pointer_rtx, end_addr));
25694 /* Emit a blockage to ensure the allocation/probing insns are
25695 not optimized, combined, removed, etc. Other cases handle this
25696 within their call to rs6000_emit_allocate_stack_1. */
25697 emit_insn (gen_blockage ());
25699 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25702 if (orig_size != rounded_size)
25704 /* Allocate (and implicitly probe) any residual space. */
25705 HOST_WIDE_INT residual = orig_size - rounded_size;
25707 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25709 /* If the residual was the only allocation, then we can return the
25710 allocating insn. */
25711 if (!retval)
25712 retval = insn;
25715 return retval;
25718 /* Emit the correct code for allocating stack space, as insns.
25719 If COPY_REG, make sure a copy of the old frame is left there.
25720 The generated code may use hard register 0 as a temporary. */
25722 static rtx_insn *
25723 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25725 rtx_insn *insn;
25726 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25727 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25728 rtx todec = gen_int_mode (-size, Pmode);
25730 if (INTVAL (todec) != -size)
25732 warning (0, "stack frame too large");
25733 emit_insn (gen_trap ());
25734 return 0;
25737 if (crtl->limit_stack)
25739 if (REG_P (stack_limit_rtx)
25740 && REGNO (stack_limit_rtx) > 1
25741 && REGNO (stack_limit_rtx) <= 31)
25743 rtx_insn *insn
25744 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25745 gcc_assert (insn);
25746 emit_insn (insn);
25747 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25749 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25750 && TARGET_32BIT
25751 && DEFAULT_ABI == ABI_V4
25752 && !flag_pic)
25754 rtx toload = gen_rtx_CONST (VOIDmode,
25755 gen_rtx_PLUS (Pmode,
25756 stack_limit_rtx,
25757 GEN_INT (size)));
25759 emit_insn (gen_elf_high (tmp_reg, toload));
25760 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25761 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25762 const0_rtx));
25764 else
25765 warning (0, "stack limit expression is not supported");
25768 if (flag_stack_clash_protection)
25770 if (size < get_stack_clash_protection_guard_size ())
25771 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25772 else
25774 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25775 copy_reg);
25777 /* If we asked for a copy with an offset, then we still need add in
25778 the offset. */
25779 if (copy_reg && copy_off)
25780 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25781 return insn;
25785 if (copy_reg)
25787 if (copy_off != 0)
25788 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25789 else
25790 emit_move_insn (copy_reg, stack_reg);
25793 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25794 it now and set the alias set/attributes. The above gen_*_update
25795 calls will generate a PARALLEL with the MEM set being the first
25796 operation. */
25797 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25798 return insn;
25801 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25803 #if PROBE_INTERVAL > 32768
25804 #error Cannot use indexed addressing mode for stack probing
25805 #endif
25807 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25808 inclusive. These are offsets from the current stack pointer. */
25810 static void
25811 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25813 /* See if we have a constant small number of probes to generate. If so,
25814 that's the easy case. */
25815 if (first + size <= 32768)
25817 HOST_WIDE_INT i;
25819 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25820 it exceeds SIZE. If only one probe is needed, this will not
25821 generate any code. Then probe at FIRST + SIZE. */
25822 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25823 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25824 -(first + i)));
25826 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25827 -(first + size)));
25830 /* Otherwise, do the same as above, but in a loop. Note that we must be
25831 extra careful with variables wrapping around because we might be at
25832 the very top (or the very bottom) of the address space and we have
25833 to be able to handle this case properly; in particular, we use an
25834 equality test for the loop condition. */
25835 else
25837 HOST_WIDE_INT rounded_size;
25838 rtx r12 = gen_rtx_REG (Pmode, 12);
25839 rtx r0 = gen_rtx_REG (Pmode, 0);
25841 /* Sanity check for the addressing mode we're going to use. */
25842 gcc_assert (first <= 32768);
25844 /* Step 1: round SIZE to the previous multiple of the interval. */
25846 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25849 /* Step 2: compute initial and final value of the loop counter. */
25851 /* TEST_ADDR = SP + FIRST. */
25852 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25853 -first)));
25855 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25856 if (rounded_size > 32768)
25858 emit_move_insn (r0, GEN_INT (-rounded_size));
25859 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25861 else
25862 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25863 -rounded_size)));
25866 /* Step 3: the loop
25870 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25871 probe at TEST_ADDR
25873 while (TEST_ADDR != LAST_ADDR)
25875 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25876 until it is equal to ROUNDED_SIZE. */
25878 if (TARGET_64BIT)
25879 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25880 else
25881 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25884 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25885 that SIZE is equal to ROUNDED_SIZE. */
25887 if (size != rounded_size)
25888 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25892 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25893 addresses, not offsets. */
25895 static const char *
25896 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25898 static int labelno = 0;
25899 char loop_lab[32];
25900 rtx xops[2];
25902 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25904 /* Loop. */
25905 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25907 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25908 xops[0] = reg1;
25909 xops[1] = GEN_INT (-PROBE_INTERVAL);
25910 output_asm_insn ("addi %0,%0,%1", xops);
25912 /* Probe at TEST_ADDR. */
25913 xops[1] = gen_rtx_REG (Pmode, 0);
25914 output_asm_insn ("stw %1,0(%0)", xops);
25916 /* Test if TEST_ADDR == LAST_ADDR. */
25917 xops[1] = reg2;
25918 if (TARGET_64BIT)
25919 output_asm_insn ("cmpd 0,%0,%1", xops);
25920 else
25921 output_asm_insn ("cmpw 0,%0,%1", xops);
25923 /* Branch. */
25924 fputs ("\tbne 0,", asm_out_file);
25925 assemble_name_raw (asm_out_file, loop_lab);
25926 fputc ('\n', asm_out_file);
25928 return "";
25931 /* This function is called when rs6000_frame_related is processing
25932 SETs within a PARALLEL, and returns whether the REGNO save ought to
25933 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25934 for out-of-line register save functions, store multiple, and the
25935 Darwin world_save. They may contain registers that don't really
25936 need saving. */
25938 static bool
25939 interesting_frame_related_regno (unsigned int regno)
25941 /* Saves apparently of r0 are actually saving LR. It doesn't make
25942 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25943 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25944 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25945 as frame related. */
25946 if (regno == 0)
25947 return true;
25948 /* If we see CR2 then we are here on a Darwin world save. Saves of
25949 CR2 signify the whole CR is being saved. This is a long-standing
25950 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25951 that CR needs to be saved. */
25952 if (regno == CR2_REGNO)
25953 return true;
25954 /* Omit frame info for any user-defined global regs. If frame info
25955 is supplied for them, frame unwinding will restore a user reg.
25956 Also omit frame info for any reg we don't need to save, as that
25957 bloats frame info and can cause problems with shrink wrapping.
25958 Since global regs won't be seen as needing to be saved, both of
25959 these conditions are covered by save_reg_p. */
25960 return save_reg_p (regno);
25963 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25964 addresses, not offsets.
25966 REG2 contains the backchain that must be stored into *sp at each allocation.
25968 This is subtly different than the Ada probing above in that it tries hard
25969 to prevent attacks that jump the stack guard. Thus, it is never allowed
25970 to allocate more than PROBE_INTERVAL bytes of stack space without a
25971 suitable probe. */
25973 static const char *
25974 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25976 static int labelno = 0;
25977 char loop_lab[32];
25978 rtx xops[3];
25980 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25982 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25984 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25986 /* This allocates and probes. */
25987 xops[0] = reg1;
25988 xops[1] = reg2;
25989 xops[2] = GEN_INT (-probe_interval);
25990 if (TARGET_64BIT)
25991 output_asm_insn ("stdu %1,%2(%0)", xops);
25992 else
25993 output_asm_insn ("stwu %1,%2(%0)", xops);
25995 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25996 xops[0] = reg1;
25997 xops[1] = reg3;
25998 if (TARGET_64BIT)
25999 output_asm_insn ("cmpd 0,%0,%1", xops);
26000 else
26001 output_asm_insn ("cmpw 0,%0,%1", xops);
26003 fputs ("\tbne 0,", asm_out_file);
26004 assemble_name_raw (asm_out_file, loop_lab);
26005 fputc ('\n', asm_out_file);
26007 return "";
26010 /* Wrapper around the output_probe_stack_range routines. */
26011 const char *
26012 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
26014 if (flag_stack_clash_protection)
26015 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
26016 else
26017 return output_probe_stack_range_1 (reg1, reg3);
26020 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26021 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26022 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26023 deduce these equivalences by itself so it wasn't necessary to hold
26024 its hand so much. Don't be tempted to always supply d2_f_d_e with
26025 the actual cfa register, ie. r31 when we are using a hard frame
26026 pointer. That fails when saving regs off r1, and sched moves the
26027 r31 setup past the reg saves. */
26029 static rtx_insn *
26030 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
26031 rtx reg2, rtx repl2)
26033 rtx repl;
26035 if (REGNO (reg) == STACK_POINTER_REGNUM)
26037 gcc_checking_assert (val == 0);
26038 repl = NULL_RTX;
26040 else
26041 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26042 GEN_INT (val));
26044 rtx pat = PATTERN (insn);
26045 if (!repl && !reg2)
26047 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26048 if (GET_CODE (pat) == PARALLEL)
26049 for (int i = 0; i < XVECLEN (pat, 0); i++)
26050 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26052 rtx set = XVECEXP (pat, 0, i);
26054 if (!REG_P (SET_SRC (set))
26055 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26056 RTX_FRAME_RELATED_P (set) = 1;
26058 RTX_FRAME_RELATED_P (insn) = 1;
26059 return insn;
26062 /* We expect that 'pat' is either a SET or a PARALLEL containing
26063 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26064 are important so they all have to be marked RTX_FRAME_RELATED_P.
26065 Call simplify_replace_rtx on the SETs rather than the whole insn
26066 so as to leave the other stuff alone (for example USE of r12). */
26068 set_used_flags (pat);
26069 if (GET_CODE (pat) == SET)
26071 if (repl)
26072 pat = simplify_replace_rtx (pat, reg, repl);
26073 if (reg2)
26074 pat = simplify_replace_rtx (pat, reg2, repl2);
26076 else if (GET_CODE (pat) == PARALLEL)
26078 pat = shallow_copy_rtx (pat);
26079 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26081 for (int i = 0; i < XVECLEN (pat, 0); i++)
26082 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26084 rtx set = XVECEXP (pat, 0, i);
26086 if (repl)
26087 set = simplify_replace_rtx (set, reg, repl);
26088 if (reg2)
26089 set = simplify_replace_rtx (set, reg2, repl2);
26090 XVECEXP (pat, 0, i) = set;
26092 if (!REG_P (SET_SRC (set))
26093 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26094 RTX_FRAME_RELATED_P (set) = 1;
26097 else
26098 gcc_unreachable ();
26100 RTX_FRAME_RELATED_P (insn) = 1;
26101 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26103 return insn;
26106 /* Returns an insn that has a vrsave set operation with the
26107 appropriate CLOBBERs. */
26109 static rtx
26110 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26112 int nclobs, i;
26113 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26114 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26116 clobs[0]
26117 = gen_rtx_SET (vrsave,
26118 gen_rtx_UNSPEC_VOLATILE (SImode,
26119 gen_rtvec (2, reg, vrsave),
26120 UNSPECV_SET_VRSAVE));
26122 nclobs = 1;
26124 /* We need to clobber the registers in the mask so the scheduler
26125 does not move sets to VRSAVE before sets of AltiVec registers.
26127 However, if the function receives nonlocal gotos, reload will set
26128 all call saved registers live. We will end up with:
26130 (set (reg 999) (mem))
26131 (parallel [ (set (reg vrsave) (unspec blah))
26132 (clobber (reg 999))])
26134 The clobber will cause the store into reg 999 to be dead, and
26135 flow will attempt to delete an epilogue insn. In this case, we
26136 need an unspec use/set of the register. */
26138 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26139 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26141 if (!epiloguep || call_used_regs [i])
26142 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26143 gen_rtx_REG (V4SImode, i));
26144 else
26146 rtx reg = gen_rtx_REG (V4SImode, i);
26148 clobs[nclobs++]
26149 = gen_rtx_SET (reg,
26150 gen_rtx_UNSPEC (V4SImode,
26151 gen_rtvec (1, reg), 27));
26155 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26157 for (i = 0; i < nclobs; ++i)
26158 XVECEXP (insn, 0, i) = clobs[i];
26160 return insn;
26163 static rtx
26164 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26166 rtx addr, mem;
26168 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26169 mem = gen_frame_mem (GET_MODE (reg), addr);
26170 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26173 static rtx
26174 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26176 return gen_frame_set (reg, frame_reg, offset, false);
26179 static rtx
26180 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26182 return gen_frame_set (reg, frame_reg, offset, true);
26185 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26186 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26188 static rtx_insn *
26189 emit_frame_save (rtx frame_reg, machine_mode mode,
26190 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26192 rtx reg;
26194 /* Some cases that need register indexed addressing. */
26195 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26196 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26198 reg = gen_rtx_REG (mode, regno);
26199 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26200 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26201 NULL_RTX, NULL_RTX);
26204 /* Emit an offset memory reference suitable for a frame store, while
26205 converting to a valid addressing mode. */
26207 static rtx
26208 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26210 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26213 #ifndef TARGET_FIX_AND_CONTINUE
26214 #define TARGET_FIX_AND_CONTINUE 0
26215 #endif
26217 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26218 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26219 #define LAST_SAVRES_REGISTER 31
26220 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26222 enum {
26223 SAVRES_LR = 0x1,
26224 SAVRES_SAVE = 0x2,
26225 SAVRES_REG = 0x0c,
26226 SAVRES_GPR = 0,
26227 SAVRES_FPR = 4,
26228 SAVRES_VR = 8
26231 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26233 /* Temporary holding space for an out-of-line register save/restore
26234 routine name. */
26235 static char savres_routine_name[30];
26237 /* Return the name for an out-of-line register save/restore routine.
26238 We are saving/restoring GPRs if GPR is true. */
26240 static char *
26241 rs6000_savres_routine_name (int regno, int sel)
26243 const char *prefix = "";
26244 const char *suffix = "";
26246 /* Different targets are supposed to define
26247 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26248 routine name could be defined with:
26250 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26252 This is a nice idea in practice, but in reality, things are
26253 complicated in several ways:
26255 - ELF targets have save/restore routines for GPRs.
26257 - PPC64 ELF targets have routines for save/restore of GPRs that
26258 differ in what they do with the link register, so having a set
26259 prefix doesn't work. (We only use one of the save routines at
26260 the moment, though.)
26262 - PPC32 elf targets have "exit" versions of the restore routines
26263 that restore the link register and can save some extra space.
26264 These require an extra suffix. (There are also "tail" versions
26265 of the restore routines and "GOT" versions of the save routines,
26266 but we don't generate those at present. Same problems apply,
26267 though.)
26269 We deal with all this by synthesizing our own prefix/suffix and
26270 using that for the simple sprintf call shown above. */
26271 if (DEFAULT_ABI == ABI_V4)
26273 if (TARGET_64BIT)
26274 goto aix_names;
26276 if ((sel & SAVRES_REG) == SAVRES_GPR)
26277 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26278 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26279 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26280 else if ((sel & SAVRES_REG) == SAVRES_VR)
26281 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26282 else
26283 abort ();
26285 if ((sel & SAVRES_LR))
26286 suffix = "_x";
26288 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26290 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26291 /* No out-of-line save/restore routines for GPRs on AIX. */
26292 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26293 #endif
26295 aix_names:
26296 if ((sel & SAVRES_REG) == SAVRES_GPR)
26297 prefix = ((sel & SAVRES_SAVE)
26298 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26299 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26300 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26302 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26303 if ((sel & SAVRES_LR))
26304 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26305 else
26306 #endif
26308 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26309 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26312 else if ((sel & SAVRES_REG) == SAVRES_VR)
26313 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26314 else
26315 abort ();
26318 if (DEFAULT_ABI == ABI_DARWIN)
26320 /* The Darwin approach is (slightly) different, in order to be
26321 compatible with code generated by the system toolchain. There is a
26322 single symbol for the start of save sequence, and the code here
26323 embeds an offset into that code on the basis of the first register
26324 to be saved. */
26325 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26326 if ((sel & SAVRES_REG) == SAVRES_GPR)
26327 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26328 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26329 (regno - 13) * 4, prefix, regno);
26330 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26331 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26332 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26333 else if ((sel & SAVRES_REG) == SAVRES_VR)
26334 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26335 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26336 else
26337 abort ();
26339 else
26340 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26342 return savres_routine_name;
26345 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26346 We are saving/restoring GPRs if GPR is true. */
26348 static rtx
26349 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26351 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26352 ? info->first_gp_reg_save
26353 : (sel & SAVRES_REG) == SAVRES_FPR
26354 ? info->first_fp_reg_save - 32
26355 : (sel & SAVRES_REG) == SAVRES_VR
26356 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26357 : -1);
26358 rtx sym;
26359 int select = sel;
26361 /* Don't generate bogus routine names. */
26362 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26363 && regno <= LAST_SAVRES_REGISTER
26364 && select >= 0 && select <= 12);
26366 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26368 if (sym == NULL)
26370 char *name;
26372 name = rs6000_savres_routine_name (regno, sel);
26374 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26375 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26376 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26379 return sym;
26382 /* Emit a sequence of insns, including a stack tie if needed, for
26383 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26384 reset the stack pointer, but move the base of the frame into
26385 reg UPDT_REGNO for use by out-of-line register restore routines. */
26387 static rtx
26388 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26389 unsigned updt_regno)
26391 /* If there is nothing to do, don't do anything. */
26392 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26393 return NULL_RTX;
26395 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26397 /* This blockage is needed so that sched doesn't decide to move
26398 the sp change before the register restores. */
26399 if (DEFAULT_ABI == ABI_V4)
26400 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26401 GEN_INT (frame_off)));
26403 /* If we are restoring registers out-of-line, we will be using the
26404 "exit" variants of the restore routines, which will reset the
26405 stack for us. But we do need to point updt_reg into the
26406 right place for those routines. */
26407 if (frame_off != 0)
26408 return emit_insn (gen_add3_insn (updt_reg_rtx,
26409 frame_reg_rtx, GEN_INT (frame_off)));
26410 else
26411 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26413 return NULL_RTX;
26416 /* Return the register number used as a pointer by out-of-line
26417 save/restore functions. */
26419 static inline unsigned
26420 ptr_regno_for_savres (int sel)
26422 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26423 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26424 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26427 /* Construct a parallel rtx describing the effect of a call to an
26428 out-of-line register save/restore routine, and emit the insn
26429 or jump_insn as appropriate. */
26431 static rtx_insn *
26432 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26433 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26434 machine_mode reg_mode, int sel)
26436 int i;
26437 int offset, start_reg, end_reg, n_regs, use_reg;
26438 int reg_size = GET_MODE_SIZE (reg_mode);
26439 rtx sym;
26440 rtvec p;
26441 rtx par;
26442 rtx_insn *insn;
26444 offset = 0;
26445 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26446 ? info->first_gp_reg_save
26447 : (sel & SAVRES_REG) == SAVRES_FPR
26448 ? info->first_fp_reg_save
26449 : (sel & SAVRES_REG) == SAVRES_VR
26450 ? info->first_altivec_reg_save
26451 : -1);
26452 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26453 ? 32
26454 : (sel & SAVRES_REG) == SAVRES_FPR
26455 ? 64
26456 : (sel & SAVRES_REG) == SAVRES_VR
26457 ? LAST_ALTIVEC_REGNO + 1
26458 : -1);
26459 n_regs = end_reg - start_reg;
26460 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26461 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26462 + n_regs);
26464 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26465 RTVEC_ELT (p, offset++) = ret_rtx;
26467 RTVEC_ELT (p, offset++)
26468 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26470 sym = rs6000_savres_routine_sym (info, sel);
26471 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26473 use_reg = ptr_regno_for_savres (sel);
26474 if ((sel & SAVRES_REG) == SAVRES_VR)
26476 /* Vector regs are saved/restored using [reg+reg] addressing. */
26477 RTVEC_ELT (p, offset++)
26478 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26479 RTVEC_ELT (p, offset++)
26480 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26482 else
26483 RTVEC_ELT (p, offset++)
26484 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26486 for (i = 0; i < end_reg - start_reg; i++)
26487 RTVEC_ELT (p, i + offset)
26488 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26489 frame_reg_rtx, save_area_offset + reg_size * i,
26490 (sel & SAVRES_SAVE) != 0);
26492 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26493 RTVEC_ELT (p, i + offset)
26494 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26496 par = gen_rtx_PARALLEL (VOIDmode, p);
26498 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26500 insn = emit_jump_insn (par);
26501 JUMP_LABEL (insn) = ret_rtx;
26503 else
26504 insn = emit_insn (par);
26505 return insn;
26508 /* Emit prologue code to store CR fields that need to be saved into REG. This
26509 function should only be called when moving the non-volatile CRs to REG, it
26510 is not a general purpose routine to move the entire set of CRs to REG.
26511 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26512 volatile CRs. */
26514 static void
26515 rs6000_emit_prologue_move_from_cr (rtx reg)
26517 /* Only the ELFv2 ABI allows storing only selected fields. */
26518 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26520 int i, cr_reg[8], count = 0;
26522 /* Collect CR fields that must be saved. */
26523 for (i = 0; i < 8; i++)
26524 if (save_reg_p (CR0_REGNO + i))
26525 cr_reg[count++] = i;
26527 /* If it's just a single one, use mfcrf. */
26528 if (count == 1)
26530 rtvec p = rtvec_alloc (1);
26531 rtvec r = rtvec_alloc (2);
26532 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26533 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26534 RTVEC_ELT (p, 0)
26535 = gen_rtx_SET (reg,
26536 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26538 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26539 return;
26542 /* ??? It might be better to handle count == 2 / 3 cases here
26543 as well, using logical operations to combine the values. */
26546 emit_insn (gen_prologue_movesi_from_cr (reg));
26549 /* Return whether the split-stack arg pointer (r12) is used. */
26551 static bool
26552 split_stack_arg_pointer_used_p (void)
26554 /* If the pseudo holding the arg pointer is no longer a pseudo,
26555 then the arg pointer is used. */
26556 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26557 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26558 || (REGNO (cfun->machine->split_stack_arg_pointer)
26559 < FIRST_PSEUDO_REGISTER)))
26560 return true;
26562 /* Unfortunately we also need to do some code scanning, since
26563 r12 may have been substituted for the pseudo. */
26564 rtx_insn *insn;
26565 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26566 FOR_BB_INSNS (bb, insn)
26567 if (NONDEBUG_INSN_P (insn))
26569 /* A call destroys r12. */
26570 if (CALL_P (insn))
26571 return false;
26573 df_ref use;
26574 FOR_EACH_INSN_USE (use, insn)
26576 rtx x = DF_REF_REG (use);
26577 if (REG_P (x) && REGNO (x) == 12)
26578 return true;
26580 df_ref def;
26581 FOR_EACH_INSN_DEF (def, insn)
26583 rtx x = DF_REF_REG (def);
26584 if (REG_P (x) && REGNO (x) == 12)
26585 return false;
26588 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26591 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26593 static bool
26594 rs6000_global_entry_point_needed_p (void)
26596 /* Only needed for the ELFv2 ABI. */
26597 if (DEFAULT_ABI != ABI_ELFv2)
26598 return false;
26600 /* With -msingle-pic-base, we assume the whole program shares the same
26601 TOC, so no global entry point prologues are needed anywhere. */
26602 if (TARGET_SINGLE_PIC_BASE)
26603 return false;
26605 /* Ensure we have a global entry point for thunks. ??? We could
26606 avoid that if the target routine doesn't need a global entry point,
26607 but we do not know whether this is the case at this point. */
26608 if (cfun->is_thunk)
26609 return true;
26611 /* For regular functions, rs6000_emit_prologue sets this flag if the
26612 routine ever uses the TOC pointer. */
26613 return cfun->machine->r2_setup_needed;
26616 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26617 static sbitmap
26618 rs6000_get_separate_components (void)
26620 rs6000_stack_t *info = rs6000_stack_info ();
26622 if (WORLD_SAVE_P (info))
26623 return NULL;
26625 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26626 && !(info->savres_strategy & REST_MULTIPLE));
26628 /* Component 0 is the save/restore of LR (done via GPR0).
26629 Component 2 is the save of the TOC (GPR2).
26630 Components 13..31 are the save/restore of GPR13..GPR31.
26631 Components 46..63 are the save/restore of FPR14..FPR31. */
26633 cfun->machine->n_components = 64;
26635 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26636 bitmap_clear (components);
26638 int reg_size = TARGET_32BIT ? 4 : 8;
26639 int fp_reg_size = 8;
26641 /* The GPRs we need saved to the frame. */
26642 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26643 && (info->savres_strategy & REST_INLINE_GPRS))
26645 int offset = info->gp_save_offset;
26646 if (info->push_p)
26647 offset += info->total_size;
26649 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26651 if (IN_RANGE (offset, -0x8000, 0x7fff)
26652 && save_reg_p (regno))
26653 bitmap_set_bit (components, regno);
26655 offset += reg_size;
26659 /* Don't mess with the hard frame pointer. */
26660 if (frame_pointer_needed)
26661 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26663 /* Don't mess with the fixed TOC register. */
26664 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26665 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26666 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26667 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26669 /* The FPRs we need saved to the frame. */
26670 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26671 && (info->savres_strategy & REST_INLINE_FPRS))
26673 int offset = info->fp_save_offset;
26674 if (info->push_p)
26675 offset += info->total_size;
26677 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26679 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26680 bitmap_set_bit (components, regno);
26682 offset += fp_reg_size;
26686 /* Optimize LR save and restore if we can. This is component 0. Any
26687 out-of-line register save/restore routines need LR. */
26688 if (info->lr_save_p
26689 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26690 && (info->savres_strategy & SAVE_INLINE_GPRS)
26691 && (info->savres_strategy & REST_INLINE_GPRS)
26692 && (info->savres_strategy & SAVE_INLINE_FPRS)
26693 && (info->savres_strategy & REST_INLINE_FPRS)
26694 && (info->savres_strategy & SAVE_INLINE_VRS)
26695 && (info->savres_strategy & REST_INLINE_VRS))
26697 int offset = info->lr_save_offset;
26698 if (info->push_p)
26699 offset += info->total_size;
26700 if (IN_RANGE (offset, -0x8000, 0x7fff))
26701 bitmap_set_bit (components, 0);
26704 /* Optimize saving the TOC. This is component 2. */
26705 if (cfun->machine->save_toc_in_prologue)
26706 bitmap_set_bit (components, 2);
26708 return components;
26711 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26712 static sbitmap
26713 rs6000_components_for_bb (basic_block bb)
26715 rs6000_stack_t *info = rs6000_stack_info ();
26717 bitmap in = DF_LIVE_IN (bb);
26718 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26719 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26721 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26722 bitmap_clear (components);
26724 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26726 /* GPRs. */
26727 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26728 if (bitmap_bit_p (in, regno)
26729 || bitmap_bit_p (gen, regno)
26730 || bitmap_bit_p (kill, regno))
26731 bitmap_set_bit (components, regno);
26733 /* FPRs. */
26734 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26735 if (bitmap_bit_p (in, regno)
26736 || bitmap_bit_p (gen, regno)
26737 || bitmap_bit_p (kill, regno))
26738 bitmap_set_bit (components, regno);
26740 /* The link register. */
26741 if (bitmap_bit_p (in, LR_REGNO)
26742 || bitmap_bit_p (gen, LR_REGNO)
26743 || bitmap_bit_p (kill, LR_REGNO))
26744 bitmap_set_bit (components, 0);
26746 /* The TOC save. */
26747 if (bitmap_bit_p (in, TOC_REGNUM)
26748 || bitmap_bit_p (gen, TOC_REGNUM)
26749 || bitmap_bit_p (kill, TOC_REGNUM))
26750 bitmap_set_bit (components, 2);
26752 return components;
26755 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26756 static void
26757 rs6000_disqualify_components (sbitmap components, edge e,
26758 sbitmap edge_components, bool /*is_prologue*/)
26760 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26761 live where we want to place that code. */
26762 if (bitmap_bit_p (edge_components, 0)
26763 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26765 if (dump_file)
26766 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26767 "on entry to bb %d\n", e->dest->index);
26768 bitmap_clear_bit (components, 0);
26772 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26773 static void
26774 rs6000_emit_prologue_components (sbitmap components)
26776 rs6000_stack_t *info = rs6000_stack_info ();
26777 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26778 ? HARD_FRAME_POINTER_REGNUM
26779 : STACK_POINTER_REGNUM);
26781 machine_mode reg_mode = Pmode;
26782 int reg_size = TARGET_32BIT ? 4 : 8;
26783 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26784 ? DFmode : SFmode;
26785 int fp_reg_size = 8;
26787 /* Prologue for LR. */
26788 if (bitmap_bit_p (components, 0))
26790 rtx reg = gen_rtx_REG (reg_mode, 0);
26791 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26792 RTX_FRAME_RELATED_P (insn) = 1;
26793 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26795 int offset = info->lr_save_offset;
26796 if (info->push_p)
26797 offset += info->total_size;
26799 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26800 RTX_FRAME_RELATED_P (insn) = 1;
26801 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26802 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26803 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26806 /* Prologue for TOC. */
26807 if (bitmap_bit_p (components, 2))
26809 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26810 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26811 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26814 /* Prologue for the GPRs. */
26815 int offset = info->gp_save_offset;
26816 if (info->push_p)
26817 offset += info->total_size;
26819 for (int i = info->first_gp_reg_save; i < 32; i++)
26821 if (bitmap_bit_p (components, i))
26823 rtx reg = gen_rtx_REG (reg_mode, i);
26824 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26825 RTX_FRAME_RELATED_P (insn) = 1;
26826 rtx set = copy_rtx (single_set (insn));
26827 add_reg_note (insn, REG_CFA_OFFSET, set);
26830 offset += reg_size;
26833 /* Prologue for the FPRs. */
26834 offset = info->fp_save_offset;
26835 if (info->push_p)
26836 offset += info->total_size;
26838 for (int i = info->first_fp_reg_save; i < 64; i++)
26840 if (bitmap_bit_p (components, i))
26842 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26843 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26844 RTX_FRAME_RELATED_P (insn) = 1;
26845 rtx set = copy_rtx (single_set (insn));
26846 add_reg_note (insn, REG_CFA_OFFSET, set);
26849 offset += fp_reg_size;
26853 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26854 static void
26855 rs6000_emit_epilogue_components (sbitmap components)
26857 rs6000_stack_t *info = rs6000_stack_info ();
26858 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26859 ? HARD_FRAME_POINTER_REGNUM
26860 : STACK_POINTER_REGNUM);
26862 machine_mode reg_mode = Pmode;
26863 int reg_size = TARGET_32BIT ? 4 : 8;
26865 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26866 ? DFmode : SFmode;
26867 int fp_reg_size = 8;
26869 /* Epilogue for the FPRs. */
26870 int offset = info->fp_save_offset;
26871 if (info->push_p)
26872 offset += info->total_size;
26874 for (int i = info->first_fp_reg_save; i < 64; i++)
26876 if (bitmap_bit_p (components, i))
26878 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26879 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26880 RTX_FRAME_RELATED_P (insn) = 1;
26881 add_reg_note (insn, REG_CFA_RESTORE, reg);
26884 offset += fp_reg_size;
26887 /* Epilogue for the GPRs. */
26888 offset = info->gp_save_offset;
26889 if (info->push_p)
26890 offset += info->total_size;
26892 for (int i = info->first_gp_reg_save; i < 32; i++)
26894 if (bitmap_bit_p (components, i))
26896 rtx reg = gen_rtx_REG (reg_mode, i);
26897 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26898 RTX_FRAME_RELATED_P (insn) = 1;
26899 add_reg_note (insn, REG_CFA_RESTORE, reg);
26902 offset += reg_size;
26905 /* Epilogue for LR. */
26906 if (bitmap_bit_p (components, 0))
26908 int offset = info->lr_save_offset;
26909 if (info->push_p)
26910 offset += info->total_size;
26912 rtx reg = gen_rtx_REG (reg_mode, 0);
26913 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26915 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26916 insn = emit_move_insn (lr, reg);
26917 RTX_FRAME_RELATED_P (insn) = 1;
26918 add_reg_note (insn, REG_CFA_RESTORE, lr);
26922 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26923 static void
26924 rs6000_set_handled_components (sbitmap components)
26926 rs6000_stack_t *info = rs6000_stack_info ();
26928 for (int i = info->first_gp_reg_save; i < 32; i++)
26929 if (bitmap_bit_p (components, i))
26930 cfun->machine->gpr_is_wrapped_separately[i] = true;
26932 for (int i = info->first_fp_reg_save; i < 64; i++)
26933 if (bitmap_bit_p (components, i))
26934 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26936 if (bitmap_bit_p (components, 0))
26937 cfun->machine->lr_is_wrapped_separately = true;
26939 if (bitmap_bit_p (components, 2))
26940 cfun->machine->toc_is_wrapped_separately = true;
26943 /* VRSAVE is a bit vector representing which AltiVec registers
26944 are used. The OS uses this to determine which vector
26945 registers to save on a context switch. We need to save
26946 VRSAVE on the stack frame, add whatever AltiVec registers we
26947 used in this function, and do the corresponding magic in the
26948 epilogue. */
26949 static void
26950 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26951 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26953 /* Get VRSAVE into a GPR. */
26954 rtx reg = gen_rtx_REG (SImode, save_regno);
26955 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26956 if (TARGET_MACHO)
26957 emit_insn (gen_get_vrsave_internal (reg));
26958 else
26959 emit_insn (gen_rtx_SET (reg, vrsave));
26961 /* Save VRSAVE. */
26962 int offset = info->vrsave_save_offset + frame_off;
26963 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26965 /* Include the registers in the mask. */
26966 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26968 emit_insn (generate_set_vrsave (reg, info, 0));
26971 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26972 called, it left the arg pointer to the old stack in r29. Otherwise, the
26973 arg pointer is the top of the current frame. */
26974 static void
26975 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26976 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26978 cfun->machine->split_stack_argp_used = true;
26980 if (sp_adjust)
26982 rtx r12 = gen_rtx_REG (Pmode, 12);
26983 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26984 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26985 emit_insn_before (set_r12, sp_adjust);
26987 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26989 rtx r12 = gen_rtx_REG (Pmode, 12);
26990 if (frame_off == 0)
26991 emit_move_insn (r12, frame_reg_rtx);
26992 else
26993 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26996 if (info->push_p)
26998 rtx r12 = gen_rtx_REG (Pmode, 12);
26999 rtx r29 = gen_rtx_REG (Pmode, 29);
27000 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27001 rtx not_more = gen_label_rtx ();
27002 rtx jump;
27004 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27005 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27006 gen_rtx_LABEL_REF (VOIDmode, not_more),
27007 pc_rtx);
27008 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27009 JUMP_LABEL (jump) = not_more;
27010 LABEL_NUSES (not_more) += 1;
27011 emit_move_insn (r12, r29);
27012 emit_label (not_more);
27016 /* Emit function prologue as insns. */
27018 void
27019 rs6000_emit_prologue (void)
27021 rs6000_stack_t *info = rs6000_stack_info ();
27022 machine_mode reg_mode = Pmode;
27023 int reg_size = TARGET_32BIT ? 4 : 8;
27024 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27025 ? DFmode : SFmode;
27026 int fp_reg_size = 8;
27027 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27028 rtx frame_reg_rtx = sp_reg_rtx;
27029 unsigned int cr_save_regno;
27030 rtx cr_save_rtx = NULL_RTX;
27031 rtx_insn *insn;
27032 int strategy;
27033 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27034 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27035 && call_used_regs[STATIC_CHAIN_REGNUM]);
27036 int using_split_stack = (flag_split_stack
27037 && (lookup_attribute ("no_split_stack",
27038 DECL_ATTRIBUTES (cfun->decl))
27039 == NULL));
27041 /* Offset to top of frame for frame_reg and sp respectively. */
27042 HOST_WIDE_INT frame_off = 0;
27043 HOST_WIDE_INT sp_off = 0;
27044 /* sp_adjust is the stack adjusting instruction, tracked so that the
27045 insn setting up the split-stack arg pointer can be emitted just
27046 prior to it, when r12 is not used here for other purposes. */
27047 rtx_insn *sp_adjust = 0;
27049 #if CHECKING_P
27050 /* Track and check usage of r0, r11, r12. */
27051 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27052 #define START_USE(R) do \
27054 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27055 reg_inuse |= 1 << (R); \
27056 } while (0)
27057 #define END_USE(R) do \
27059 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27060 reg_inuse &= ~(1 << (R)); \
27061 } while (0)
27062 #define NOT_INUSE(R) do \
27064 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27065 } while (0)
27066 #else
27067 #define START_USE(R) do {} while (0)
27068 #define END_USE(R) do {} while (0)
27069 #define NOT_INUSE(R) do {} while (0)
27070 #endif
27072 if (DEFAULT_ABI == ABI_ELFv2
27073 && !TARGET_SINGLE_PIC_BASE)
27075 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27077 /* With -mminimal-toc we may generate an extra use of r2 below. */
27078 if (TARGET_TOC && TARGET_MINIMAL_TOC
27079 && !constant_pool_empty_p ())
27080 cfun->machine->r2_setup_needed = true;
27084 if (flag_stack_usage_info)
27085 current_function_static_stack_size = info->total_size;
27087 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27089 HOST_WIDE_INT size = info->total_size;
27091 if (crtl->is_leaf && !cfun->calls_alloca)
27093 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27094 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27095 size - get_stack_check_protect ());
27097 else if (size > 0)
27098 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27101 if (TARGET_FIX_AND_CONTINUE)
27103 /* gdb on darwin arranges to forward a function from the old
27104 address by modifying the first 5 instructions of the function
27105 to branch to the overriding function. This is necessary to
27106 permit function pointers that point to the old function to
27107 actually forward to the new function. */
27108 emit_insn (gen_nop ());
27109 emit_insn (gen_nop ());
27110 emit_insn (gen_nop ());
27111 emit_insn (gen_nop ());
27112 emit_insn (gen_nop ());
27115 /* Handle world saves specially here. */
27116 if (WORLD_SAVE_P (info))
27118 int i, j, sz;
27119 rtx treg;
27120 rtvec p;
27121 rtx reg0;
27123 /* save_world expects lr in r0. */
27124 reg0 = gen_rtx_REG (Pmode, 0);
27125 if (info->lr_save_p)
27127 insn = emit_move_insn (reg0,
27128 gen_rtx_REG (Pmode, LR_REGNO));
27129 RTX_FRAME_RELATED_P (insn) = 1;
27132 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27133 assumptions about the offsets of various bits of the stack
27134 frame. */
27135 gcc_assert (info->gp_save_offset == -220
27136 && info->fp_save_offset == -144
27137 && info->lr_save_offset == 8
27138 && info->cr_save_offset == 4
27139 && info->push_p
27140 && info->lr_save_p
27141 && (!crtl->calls_eh_return
27142 || info->ehrd_offset == -432)
27143 && info->vrsave_save_offset == -224
27144 && info->altivec_save_offset == -416);
27146 treg = gen_rtx_REG (SImode, 11);
27147 emit_move_insn (treg, GEN_INT (-info->total_size));
27149 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27150 in R11. It also clobbers R12, so beware! */
27152 /* Preserve CR2 for save_world prologues */
27153 sz = 5;
27154 sz += 32 - info->first_gp_reg_save;
27155 sz += 64 - info->first_fp_reg_save;
27156 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27157 p = rtvec_alloc (sz);
27158 j = 0;
27159 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
27160 gen_rtx_REG (SImode,
27161 LR_REGNO));
27162 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27163 gen_rtx_SYMBOL_REF (Pmode,
27164 "*save_world"));
27165 /* We do floats first so that the instruction pattern matches
27166 properly. */
27167 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27168 RTVEC_ELT (p, j++)
27169 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27170 ? DFmode : SFmode,
27171 info->first_fp_reg_save + i),
27172 frame_reg_rtx,
27173 info->fp_save_offset + frame_off + 8 * i);
27174 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27175 RTVEC_ELT (p, j++)
27176 = gen_frame_store (gen_rtx_REG (V4SImode,
27177 info->first_altivec_reg_save + i),
27178 frame_reg_rtx,
27179 info->altivec_save_offset + frame_off + 16 * i);
27180 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27181 RTVEC_ELT (p, j++)
27182 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27183 frame_reg_rtx,
27184 info->gp_save_offset + frame_off + reg_size * i);
27186 /* CR register traditionally saved as CR2. */
27187 RTVEC_ELT (p, j++)
27188 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27189 frame_reg_rtx, info->cr_save_offset + frame_off);
27190 /* Explain about use of R0. */
27191 if (info->lr_save_p)
27192 RTVEC_ELT (p, j++)
27193 = gen_frame_store (reg0,
27194 frame_reg_rtx, info->lr_save_offset + frame_off);
27195 /* Explain what happens to the stack pointer. */
27197 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27198 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27201 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27202 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27203 treg, GEN_INT (-info->total_size));
27204 sp_off = frame_off = info->total_size;
27207 strategy = info->savres_strategy;
27209 /* For V.4, update stack before we do any saving and set back pointer. */
27210 if (! WORLD_SAVE_P (info)
27211 && info->push_p
27212 && (DEFAULT_ABI == ABI_V4
27213 || crtl->calls_eh_return))
27215 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27216 || !(strategy & SAVE_INLINE_GPRS)
27217 || !(strategy & SAVE_INLINE_VRS));
27218 int ptr_regno = -1;
27219 rtx ptr_reg = NULL_RTX;
27220 int ptr_off = 0;
27222 if (info->total_size < 32767)
27223 frame_off = info->total_size;
27224 else if (need_r11)
27225 ptr_regno = 11;
27226 else if (info->cr_save_p
27227 || info->lr_save_p
27228 || info->first_fp_reg_save < 64
27229 || info->first_gp_reg_save < 32
27230 || info->altivec_size != 0
27231 || info->vrsave_size != 0
27232 || crtl->calls_eh_return)
27233 ptr_regno = 12;
27234 else
27236 /* The prologue won't be saving any regs so there is no need
27237 to set up a frame register to access any frame save area.
27238 We also won't be using frame_off anywhere below, but set
27239 the correct value anyway to protect against future
27240 changes to this function. */
27241 frame_off = info->total_size;
27243 if (ptr_regno != -1)
27245 /* Set up the frame offset to that needed by the first
27246 out-of-line save function. */
27247 START_USE (ptr_regno);
27248 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27249 frame_reg_rtx = ptr_reg;
27250 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27251 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27252 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27253 ptr_off = info->gp_save_offset + info->gp_size;
27254 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27255 ptr_off = info->altivec_save_offset + info->altivec_size;
27256 frame_off = -ptr_off;
27258 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27259 ptr_reg, ptr_off);
27260 if (REGNO (frame_reg_rtx) == 12)
27261 sp_adjust = 0;
27262 sp_off = info->total_size;
27263 if (frame_reg_rtx != sp_reg_rtx)
27264 rs6000_emit_stack_tie (frame_reg_rtx, false);
27267 /* If we use the link register, get it into r0. */
27268 if (!WORLD_SAVE_P (info) && info->lr_save_p
27269 && !cfun->machine->lr_is_wrapped_separately)
27271 rtx addr, reg, mem;
27273 reg = gen_rtx_REG (Pmode, 0);
27274 START_USE (0);
27275 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27276 RTX_FRAME_RELATED_P (insn) = 1;
27278 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27279 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27281 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27282 GEN_INT (info->lr_save_offset + frame_off));
27283 mem = gen_rtx_MEM (Pmode, addr);
27284 /* This should not be of rs6000_sr_alias_set, because of
27285 __builtin_return_address. */
27287 insn = emit_move_insn (mem, reg);
27288 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27289 NULL_RTX, NULL_RTX);
27290 END_USE (0);
27294 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27295 r12 will be needed by out-of-line gpr restore. */
27296 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27297 && !(strategy & (SAVE_INLINE_GPRS
27298 | SAVE_NOINLINE_GPRS_SAVES_LR))
27299 ? 11 : 12);
27300 if (!WORLD_SAVE_P (info)
27301 && info->cr_save_p
27302 && REGNO (frame_reg_rtx) != cr_save_regno
27303 && !(using_static_chain_p && cr_save_regno == 11)
27304 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27306 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27307 START_USE (cr_save_regno);
27308 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27311 /* Do any required saving of fpr's. If only one or two to save, do
27312 it ourselves. Otherwise, call function. */
27313 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27315 int offset = info->fp_save_offset + frame_off;
27316 for (int i = info->first_fp_reg_save; i < 64; i++)
27318 if (save_reg_p (i)
27319 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27320 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27321 sp_off - frame_off);
27323 offset += fp_reg_size;
27326 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27328 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27329 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27330 unsigned ptr_regno = ptr_regno_for_savres (sel);
27331 rtx ptr_reg = frame_reg_rtx;
27333 if (REGNO (frame_reg_rtx) == ptr_regno)
27334 gcc_checking_assert (frame_off == 0);
27335 else
27337 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27338 NOT_INUSE (ptr_regno);
27339 emit_insn (gen_add3_insn (ptr_reg,
27340 frame_reg_rtx, GEN_INT (frame_off)));
27342 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27343 info->fp_save_offset,
27344 info->lr_save_offset,
27345 DFmode, sel);
27346 rs6000_frame_related (insn, ptr_reg, sp_off,
27347 NULL_RTX, NULL_RTX);
27348 if (lr)
27349 END_USE (0);
27352 /* Save GPRs. This is done as a PARALLEL if we are using
27353 the store-multiple instructions. */
27354 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27356 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27357 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27358 unsigned ptr_regno = ptr_regno_for_savres (sel);
27359 rtx ptr_reg = frame_reg_rtx;
27360 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27361 int end_save = info->gp_save_offset + info->gp_size;
27362 int ptr_off;
27364 if (ptr_regno == 12)
27365 sp_adjust = 0;
27366 if (!ptr_set_up)
27367 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27369 /* Need to adjust r11 (r12) if we saved any FPRs. */
27370 if (end_save + frame_off != 0)
27372 rtx offset = GEN_INT (end_save + frame_off);
27374 if (ptr_set_up)
27375 frame_off = -end_save;
27376 else
27377 NOT_INUSE (ptr_regno);
27378 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27380 else if (!ptr_set_up)
27382 NOT_INUSE (ptr_regno);
27383 emit_move_insn (ptr_reg, frame_reg_rtx);
27385 ptr_off = -end_save;
27386 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27387 info->gp_save_offset + ptr_off,
27388 info->lr_save_offset + ptr_off,
27389 reg_mode, sel);
27390 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27391 NULL_RTX, NULL_RTX);
27392 if (lr)
27393 END_USE (0);
27395 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27397 rtvec p;
27398 int i;
27399 p = rtvec_alloc (32 - info->first_gp_reg_save);
27400 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27401 RTVEC_ELT (p, i)
27402 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27403 frame_reg_rtx,
27404 info->gp_save_offset + frame_off + reg_size * i);
27405 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27406 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27407 NULL_RTX, NULL_RTX);
27409 else if (!WORLD_SAVE_P (info))
27411 int offset = info->gp_save_offset + frame_off;
27412 for (int i = info->first_gp_reg_save; i < 32; i++)
27414 if (save_reg_p (i)
27415 && !cfun->machine->gpr_is_wrapped_separately[i])
27416 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27417 sp_off - frame_off);
27419 offset += reg_size;
27423 if (crtl->calls_eh_return)
27425 unsigned int i;
27426 rtvec p;
27428 for (i = 0; ; ++i)
27430 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27431 if (regno == INVALID_REGNUM)
27432 break;
27435 p = rtvec_alloc (i);
27437 for (i = 0; ; ++i)
27439 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27440 if (regno == INVALID_REGNUM)
27441 break;
27443 rtx set
27444 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27445 sp_reg_rtx,
27446 info->ehrd_offset + sp_off + reg_size * (int) i);
27447 RTVEC_ELT (p, i) = set;
27448 RTX_FRAME_RELATED_P (set) = 1;
27451 insn = emit_insn (gen_blockage ());
27452 RTX_FRAME_RELATED_P (insn) = 1;
27453 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27456 /* In AIX ABI we need to make sure r2 is really saved. */
27457 if (TARGET_AIX && crtl->calls_eh_return)
27459 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27460 rtx join_insn, note;
27461 rtx_insn *save_insn;
27462 long toc_restore_insn;
27464 tmp_reg = gen_rtx_REG (Pmode, 11);
27465 tmp_reg_si = gen_rtx_REG (SImode, 11);
27466 if (using_static_chain_p)
27468 START_USE (0);
27469 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27471 else
27472 START_USE (11);
27473 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27474 /* Peek at instruction to which this function returns. If it's
27475 restoring r2, then we know we've already saved r2. We can't
27476 unconditionally save r2 because the value we have will already
27477 be updated if we arrived at this function via a plt call or
27478 toc adjusting stub. */
27479 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27480 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27481 + RS6000_TOC_SAVE_SLOT);
27482 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27483 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27484 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27485 validate_condition_mode (EQ, CCUNSmode);
27486 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27487 emit_insn (gen_rtx_SET (compare_result,
27488 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27489 toc_save_done = gen_label_rtx ();
27490 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27491 gen_rtx_EQ (VOIDmode, compare_result,
27492 const0_rtx),
27493 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27494 pc_rtx);
27495 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27496 JUMP_LABEL (jump) = toc_save_done;
27497 LABEL_NUSES (toc_save_done) += 1;
27499 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27500 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27501 sp_off - frame_off);
27503 emit_label (toc_save_done);
27505 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27506 have a CFG that has different saves along different paths.
27507 Move the note to a dummy blockage insn, which describes that
27508 R2 is unconditionally saved after the label. */
27509 /* ??? An alternate representation might be a special insn pattern
27510 containing both the branch and the store. That might let the
27511 code that minimizes the number of DW_CFA_advance opcodes better
27512 freedom in placing the annotations. */
27513 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27514 if (note)
27515 remove_note (save_insn, note);
27516 else
27517 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27518 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27519 RTX_FRAME_RELATED_P (save_insn) = 0;
27521 join_insn = emit_insn (gen_blockage ());
27522 REG_NOTES (join_insn) = note;
27523 RTX_FRAME_RELATED_P (join_insn) = 1;
27525 if (using_static_chain_p)
27527 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27528 END_USE (0);
27530 else
27531 END_USE (11);
27534 /* Save CR if we use any that must be preserved. */
27535 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27537 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27538 GEN_INT (info->cr_save_offset + frame_off));
27539 rtx mem = gen_frame_mem (SImode, addr);
27541 /* If we didn't copy cr before, do so now using r0. */
27542 if (cr_save_rtx == NULL_RTX)
27544 START_USE (0);
27545 cr_save_rtx = gen_rtx_REG (SImode, 0);
27546 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27549 /* Saving CR requires a two-instruction sequence: one instruction
27550 to move the CR to a general-purpose register, and a second
27551 instruction that stores the GPR to memory.
27553 We do not emit any DWARF CFI records for the first of these,
27554 because we cannot properly represent the fact that CR is saved in
27555 a register. One reason is that we cannot express that multiple
27556 CR fields are saved; another reason is that on 64-bit, the size
27557 of the CR register in DWARF (4 bytes) differs from the size of
27558 a general-purpose register.
27560 This means if any intervening instruction were to clobber one of
27561 the call-saved CR fields, we'd have incorrect CFI. To prevent
27562 this from happening, we mark the store to memory as a use of
27563 those CR fields, which prevents any such instruction from being
27564 scheduled in between the two instructions. */
27565 rtx crsave_v[9];
27566 int n_crsave = 0;
27567 int i;
27569 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27570 for (i = 0; i < 8; i++)
27571 if (save_reg_p (CR0_REGNO + i))
27572 crsave_v[n_crsave++]
27573 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27575 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27576 gen_rtvec_v (n_crsave, crsave_v)));
27577 END_USE (REGNO (cr_save_rtx));
27579 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27580 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27581 so we need to construct a frame expression manually. */
27582 RTX_FRAME_RELATED_P (insn) = 1;
27584 /* Update address to be stack-pointer relative, like
27585 rs6000_frame_related would do. */
27586 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27587 GEN_INT (info->cr_save_offset + sp_off));
27588 mem = gen_frame_mem (SImode, addr);
27590 if (DEFAULT_ABI == ABI_ELFv2)
27592 /* In the ELFv2 ABI we generate separate CFI records for each
27593 CR field that was actually saved. They all point to the
27594 same 32-bit stack slot. */
27595 rtx crframe[8];
27596 int n_crframe = 0;
27598 for (i = 0; i < 8; i++)
27599 if (save_reg_p (CR0_REGNO + i))
27601 crframe[n_crframe]
27602 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27604 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27605 n_crframe++;
27608 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27609 gen_rtx_PARALLEL (VOIDmode,
27610 gen_rtvec_v (n_crframe, crframe)));
27612 else
27614 /* In other ABIs, by convention, we use a single CR regnum to
27615 represent the fact that all call-saved CR fields are saved.
27616 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27617 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27618 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27622 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27623 *separate* slots if the routine calls __builtin_eh_return, so
27624 that they can be independently restored by the unwinder. */
27625 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27627 int i, cr_off = info->ehcr_offset;
27628 rtx crsave;
27630 /* ??? We might get better performance by using multiple mfocrf
27631 instructions. */
27632 crsave = gen_rtx_REG (SImode, 0);
27633 emit_insn (gen_prologue_movesi_from_cr (crsave));
27635 for (i = 0; i < 8; i++)
27636 if (!call_used_regs[CR0_REGNO + i])
27638 rtvec p = rtvec_alloc (2);
27639 RTVEC_ELT (p, 0)
27640 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27641 RTVEC_ELT (p, 1)
27642 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27644 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27646 RTX_FRAME_RELATED_P (insn) = 1;
27647 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27648 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27649 sp_reg_rtx, cr_off + sp_off));
27651 cr_off += reg_size;
27655 /* If we are emitting stack probes, but allocate no stack, then
27656 just note that in the dump file. */
27657 if (flag_stack_clash_protection
27658 && dump_file
27659 && !info->push_p)
27660 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27662 /* Update stack and set back pointer unless this is V.4,
27663 for which it was done previously. */
27664 if (!WORLD_SAVE_P (info) && info->push_p
27665 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27667 rtx ptr_reg = NULL;
27668 int ptr_off = 0;
27670 /* If saving altivec regs we need to be able to address all save
27671 locations using a 16-bit offset. */
27672 if ((strategy & SAVE_INLINE_VRS) == 0
27673 || (info->altivec_size != 0
27674 && (info->altivec_save_offset + info->altivec_size - 16
27675 + info->total_size - frame_off) > 32767)
27676 || (info->vrsave_size != 0
27677 && (info->vrsave_save_offset
27678 + info->total_size - frame_off) > 32767))
27680 int sel = SAVRES_SAVE | SAVRES_VR;
27681 unsigned ptr_regno = ptr_regno_for_savres (sel);
27683 if (using_static_chain_p
27684 && ptr_regno == STATIC_CHAIN_REGNUM)
27685 ptr_regno = 12;
27686 if (REGNO (frame_reg_rtx) != ptr_regno)
27687 START_USE (ptr_regno);
27688 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27689 frame_reg_rtx = ptr_reg;
27690 ptr_off = info->altivec_save_offset + info->altivec_size;
27691 frame_off = -ptr_off;
27693 else if (REGNO (frame_reg_rtx) == 1)
27694 frame_off = info->total_size;
27695 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27696 ptr_reg, ptr_off);
27697 if (REGNO (frame_reg_rtx) == 12)
27698 sp_adjust = 0;
27699 sp_off = info->total_size;
27700 if (frame_reg_rtx != sp_reg_rtx)
27701 rs6000_emit_stack_tie (frame_reg_rtx, false);
27704 /* Set frame pointer, if needed. */
27705 if (frame_pointer_needed)
27707 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27708 sp_reg_rtx);
27709 RTX_FRAME_RELATED_P (insn) = 1;
27712 /* Save AltiVec registers if needed. Save here because the red zone does
27713 not always include AltiVec registers. */
27714 if (!WORLD_SAVE_P (info)
27715 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27717 int end_save = info->altivec_save_offset + info->altivec_size;
27718 int ptr_off;
27719 /* Oddly, the vector save/restore functions point r0 at the end
27720 of the save area, then use r11 or r12 to load offsets for
27721 [reg+reg] addressing. */
27722 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27723 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27724 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27726 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27727 NOT_INUSE (0);
27728 if (scratch_regno == 12)
27729 sp_adjust = 0;
27730 if (end_save + frame_off != 0)
27732 rtx offset = GEN_INT (end_save + frame_off);
27734 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27736 else
27737 emit_move_insn (ptr_reg, frame_reg_rtx);
27739 ptr_off = -end_save;
27740 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27741 info->altivec_save_offset + ptr_off,
27742 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27743 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27744 NULL_RTX, NULL_RTX);
27745 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27747 /* The oddity mentioned above clobbered our frame reg. */
27748 emit_move_insn (frame_reg_rtx, ptr_reg);
27749 frame_off = ptr_off;
27752 else if (!WORLD_SAVE_P (info)
27753 && info->altivec_size != 0)
27755 int i;
27757 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27758 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27760 rtx areg, savereg, mem;
27761 HOST_WIDE_INT offset;
27763 offset = (info->altivec_save_offset + frame_off
27764 + 16 * (i - info->first_altivec_reg_save));
27766 savereg = gen_rtx_REG (V4SImode, i);
27768 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27770 mem = gen_frame_mem (V4SImode,
27771 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27772 GEN_INT (offset)));
27773 insn = emit_insn (gen_rtx_SET (mem, savereg));
27774 areg = NULL_RTX;
27776 else
27778 NOT_INUSE (0);
27779 areg = gen_rtx_REG (Pmode, 0);
27780 emit_move_insn (areg, GEN_INT (offset));
27782 /* AltiVec addressing mode is [reg+reg]. */
27783 mem = gen_frame_mem (V4SImode,
27784 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27786 /* Rather than emitting a generic move, force use of the stvx
27787 instruction, which we always want on ISA 2.07 (power8) systems.
27788 In particular we don't want xxpermdi/stxvd2x for little
27789 endian. */
27790 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27793 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27794 areg, GEN_INT (offset));
27798 /* VRSAVE is a bit vector representing which AltiVec registers
27799 are used. The OS uses this to determine which vector
27800 registers to save on a context switch. We need to save
27801 VRSAVE on the stack frame, add whatever AltiVec registers we
27802 used in this function, and do the corresponding magic in the
27803 epilogue. */
27805 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27807 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27808 be using r12 as frame_reg_rtx and r11 as the static chain
27809 pointer for nested functions. */
27810 int save_regno = 12;
27811 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27812 && !using_static_chain_p)
27813 save_regno = 11;
27814 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27816 save_regno = 11;
27817 if (using_static_chain_p)
27818 save_regno = 0;
27820 NOT_INUSE (save_regno);
27822 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27825 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27826 if (!TARGET_SINGLE_PIC_BASE
27827 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27828 && !constant_pool_empty_p ())
27829 || (DEFAULT_ABI == ABI_V4
27830 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27831 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27833 /* If emit_load_toc_table will use the link register, we need to save
27834 it. We use R12 for this purpose because emit_load_toc_table
27835 can use register 0. This allows us to use a plain 'blr' to return
27836 from the procedure more often. */
27837 int save_LR_around_toc_setup = (TARGET_ELF
27838 && DEFAULT_ABI == ABI_V4
27839 && flag_pic
27840 && ! info->lr_save_p
27841 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27842 if (save_LR_around_toc_setup)
27844 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27845 rtx tmp = gen_rtx_REG (Pmode, 12);
27847 sp_adjust = 0;
27848 insn = emit_move_insn (tmp, lr);
27849 RTX_FRAME_RELATED_P (insn) = 1;
27851 rs6000_emit_load_toc_table (TRUE);
27853 insn = emit_move_insn (lr, tmp);
27854 add_reg_note (insn, REG_CFA_RESTORE, lr);
27855 RTX_FRAME_RELATED_P (insn) = 1;
27857 else
27858 rs6000_emit_load_toc_table (TRUE);
27861 #if TARGET_MACHO
27862 if (!TARGET_SINGLE_PIC_BASE
27863 && DEFAULT_ABI == ABI_DARWIN
27864 && flag_pic && crtl->uses_pic_offset_table)
27866 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27867 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27869 /* Save and restore LR locally around this call (in R0). */
27870 if (!info->lr_save_p)
27871 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27873 emit_insn (gen_load_macho_picbase (src));
27875 emit_move_insn (gen_rtx_REG (Pmode,
27876 RS6000_PIC_OFFSET_TABLE_REGNUM),
27877 lr);
27879 if (!info->lr_save_p)
27880 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27882 #endif
27884 /* If we need to, save the TOC register after doing the stack setup.
27885 Do not emit eh frame info for this save. The unwinder wants info,
27886 conceptually attached to instructions in this function, about
27887 register values in the caller of this function. This R2 may have
27888 already been changed from the value in the caller.
27889 We don't attempt to write accurate DWARF EH frame info for R2
27890 because code emitted by gcc for a (non-pointer) function call
27891 doesn't save and restore R2. Instead, R2 is managed out-of-line
27892 by a linker generated plt call stub when the function resides in
27893 a shared library. This behavior is costly to describe in DWARF,
27894 both in terms of the size of DWARF info and the time taken in the
27895 unwinder to interpret it. R2 changes, apart from the
27896 calls_eh_return case earlier in this function, are handled by
27897 linux-unwind.h frob_update_context. */
27898 if (rs6000_save_toc_in_prologue_p ()
27899 && !cfun->machine->toc_is_wrapped_separately)
27901 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27902 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27905 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27906 if (using_split_stack && split_stack_arg_pointer_used_p ())
27907 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27910 /* Output .extern statements for the save/restore routines we use. */
27912 static void
27913 rs6000_output_savres_externs (FILE *file)
27915 rs6000_stack_t *info = rs6000_stack_info ();
27917 if (TARGET_DEBUG_STACK)
27918 debug_stack_info (info);
27920 /* Write .extern for any function we will call to save and restore
27921 fp values. */
27922 if (info->first_fp_reg_save < 64
27923 && !TARGET_MACHO
27924 && !TARGET_ELF)
27926 char *name;
27927 int regno = info->first_fp_reg_save - 32;
27929 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27931 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27932 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27933 name = rs6000_savres_routine_name (regno, sel);
27934 fprintf (file, "\t.extern %s\n", name);
27936 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27938 bool lr = (info->savres_strategy
27939 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27940 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27941 name = rs6000_savres_routine_name (regno, sel);
27942 fprintf (file, "\t.extern %s\n", name);
27947 /* Write function prologue. */
27949 static void
27950 rs6000_output_function_prologue (FILE *file)
27952 if (!cfun->is_thunk)
27953 rs6000_output_savres_externs (file);
27955 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27956 immediately after the global entry point label. */
27957 if (rs6000_global_entry_point_needed_p ())
27959 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27961 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27963 if (TARGET_CMODEL != CMODEL_LARGE)
27965 /* In the small and medium code models, we assume the TOC is less
27966 2 GB away from the text section, so it can be computed via the
27967 following two-instruction sequence. */
27968 char buf[256];
27970 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27971 fprintf (file, "0:\taddis 2,12,.TOC.-");
27972 assemble_name (file, buf);
27973 fprintf (file, "@ha\n");
27974 fprintf (file, "\taddi 2,2,.TOC.-");
27975 assemble_name (file, buf);
27976 fprintf (file, "@l\n");
27978 else
27980 /* In the large code model, we allow arbitrary offsets between the
27981 TOC and the text section, so we have to load the offset from
27982 memory. The data field is emitted directly before the global
27983 entry point in rs6000_elf_declare_function_name. */
27984 char buf[256];
27986 #ifdef HAVE_AS_ENTRY_MARKERS
27987 /* If supported by the linker, emit a marker relocation. If the
27988 total code size of the final executable or shared library
27989 happens to fit into 2 GB after all, the linker will replace
27990 this code sequence with the sequence for the small or medium
27991 code model. */
27992 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27993 #endif
27994 fprintf (file, "\tld 2,");
27995 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27996 assemble_name (file, buf);
27997 fprintf (file, "-");
27998 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27999 assemble_name (file, buf);
28000 fprintf (file, "(12)\n");
28001 fprintf (file, "\tadd 2,2,12\n");
28004 fputs ("\t.localentry\t", file);
28005 assemble_name (file, name);
28006 fputs (",.-", file);
28007 assemble_name (file, name);
28008 fputs ("\n", file);
28011 /* Output -mprofile-kernel code. This needs to be done here instead of
28012 in output_function_profile since it must go after the ELFv2 ABI
28013 local entry point. */
28014 if (TARGET_PROFILE_KERNEL && crtl->profile)
28016 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28017 gcc_assert (!TARGET_32BIT);
28019 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
28021 /* In the ELFv2 ABI we have no compiler stack word. It must be
28022 the resposibility of _mcount to preserve the static chain
28023 register if required. */
28024 if (DEFAULT_ABI != ABI_ELFv2
28025 && cfun->static_chain_decl != NULL)
28027 asm_fprintf (file, "\tstd %s,24(%s)\n",
28028 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28029 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28030 asm_fprintf (file, "\tld %s,24(%s)\n",
28031 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28033 else
28034 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28037 rs6000_pic_labelno++;
28040 /* -mprofile-kernel code calls mcount before the function prolog,
28041 so a profiled leaf function should stay a leaf function. */
28042 static bool
28043 rs6000_keep_leaf_when_profiled ()
28045 return TARGET_PROFILE_KERNEL;
28048 /* Non-zero if vmx regs are restored before the frame pop, zero if
28049 we restore after the pop when possible. */
28050 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28052 /* Restoring cr is a two step process: loading a reg from the frame
28053 save, then moving the reg to cr. For ABI_V4 we must let the
28054 unwinder know that the stack location is no longer valid at or
28055 before the stack deallocation, but we can't emit a cfa_restore for
28056 cr at the stack deallocation like we do for other registers.
28057 The trouble is that it is possible for the move to cr to be
28058 scheduled after the stack deallocation. So say exactly where cr
28059 is located on each of the two insns. */
28061 static rtx
28062 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28064 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28065 rtx reg = gen_rtx_REG (SImode, regno);
28066 rtx_insn *insn = emit_move_insn (reg, mem);
28068 if (!exit_func && DEFAULT_ABI == ABI_V4)
28070 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28071 rtx set = gen_rtx_SET (reg, cr);
28073 add_reg_note (insn, REG_CFA_REGISTER, set);
28074 RTX_FRAME_RELATED_P (insn) = 1;
28076 return reg;
28079 /* Reload CR from REG. */
28081 static void
28082 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28084 int count = 0;
28085 int i;
28087 if (using_mfcr_multiple)
28089 for (i = 0; i < 8; i++)
28090 if (save_reg_p (CR0_REGNO + i))
28091 count++;
28092 gcc_assert (count);
28095 if (using_mfcr_multiple && count > 1)
28097 rtx_insn *insn;
28098 rtvec p;
28099 int ndx;
28101 p = rtvec_alloc (count);
28103 ndx = 0;
28104 for (i = 0; i < 8; i++)
28105 if (save_reg_p (CR0_REGNO + i))
28107 rtvec r = rtvec_alloc (2);
28108 RTVEC_ELT (r, 0) = reg;
28109 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28110 RTVEC_ELT (p, ndx) =
28111 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28112 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28113 ndx++;
28115 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28116 gcc_assert (ndx == count);
28118 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28119 CR field separately. */
28120 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28122 for (i = 0; i < 8; i++)
28123 if (save_reg_p (CR0_REGNO + i))
28124 add_reg_note (insn, REG_CFA_RESTORE,
28125 gen_rtx_REG (SImode, CR0_REGNO + i));
28127 RTX_FRAME_RELATED_P (insn) = 1;
28130 else
28131 for (i = 0; i < 8; i++)
28132 if (save_reg_p (CR0_REGNO + i))
28134 rtx insn = emit_insn (gen_movsi_to_cr_one
28135 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28137 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28138 CR field separately, attached to the insn that in fact
28139 restores this particular CR field. */
28140 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28142 add_reg_note (insn, REG_CFA_RESTORE,
28143 gen_rtx_REG (SImode, CR0_REGNO + i));
28145 RTX_FRAME_RELATED_P (insn) = 1;
28149 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28150 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28151 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28153 rtx_insn *insn = get_last_insn ();
28154 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28156 add_reg_note (insn, REG_CFA_RESTORE, cr);
28157 RTX_FRAME_RELATED_P (insn) = 1;
28161 /* Like cr, the move to lr instruction can be scheduled after the
28162 stack deallocation, but unlike cr, its stack frame save is still
28163 valid. So we only need to emit the cfa_restore on the correct
28164 instruction. */
28166 static void
28167 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28169 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28170 rtx reg = gen_rtx_REG (Pmode, regno);
28172 emit_move_insn (reg, mem);
28175 static void
28176 restore_saved_lr (int regno, bool exit_func)
28178 rtx reg = gen_rtx_REG (Pmode, regno);
28179 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28180 rtx_insn *insn = emit_move_insn (lr, reg);
28182 if (!exit_func && flag_shrink_wrap)
28184 add_reg_note (insn, REG_CFA_RESTORE, lr);
28185 RTX_FRAME_RELATED_P (insn) = 1;
28189 static rtx
28190 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28192 if (DEFAULT_ABI == ABI_ELFv2)
28194 int i;
28195 for (i = 0; i < 8; i++)
28196 if (save_reg_p (CR0_REGNO + i))
28198 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28199 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28200 cfa_restores);
28203 else if (info->cr_save_p)
28204 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28205 gen_rtx_REG (SImode, CR2_REGNO),
28206 cfa_restores);
28208 if (info->lr_save_p)
28209 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28210 gen_rtx_REG (Pmode, LR_REGNO),
28211 cfa_restores);
28212 return cfa_restores;
28215 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28216 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28217 below stack pointer not cloberred by signals. */
28219 static inline bool
28220 offset_below_red_zone_p (HOST_WIDE_INT offset)
28222 return offset < (DEFAULT_ABI == ABI_V4
28224 : TARGET_32BIT ? -220 : -288);
28227 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28229 static void
28230 emit_cfa_restores (rtx cfa_restores)
28232 rtx_insn *insn = get_last_insn ();
28233 rtx *loc = &REG_NOTES (insn);
28235 while (*loc)
28236 loc = &XEXP (*loc, 1);
28237 *loc = cfa_restores;
28238 RTX_FRAME_RELATED_P (insn) = 1;
28241 /* Emit function epilogue as insns. */
28243 void
28244 rs6000_emit_epilogue (int sibcall)
28246 rs6000_stack_t *info;
28247 int restoring_GPRs_inline;
28248 int restoring_FPRs_inline;
28249 int using_load_multiple;
28250 int using_mtcr_multiple;
28251 int use_backchain_to_restore_sp;
28252 int restore_lr;
28253 int strategy;
28254 HOST_WIDE_INT frame_off = 0;
28255 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28256 rtx frame_reg_rtx = sp_reg_rtx;
28257 rtx cfa_restores = NULL_RTX;
28258 rtx insn;
28259 rtx cr_save_reg = NULL_RTX;
28260 machine_mode reg_mode = Pmode;
28261 int reg_size = TARGET_32BIT ? 4 : 8;
28262 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
28263 ? DFmode : SFmode;
28264 int fp_reg_size = 8;
28265 int i;
28266 bool exit_func;
28267 unsigned ptr_regno;
28269 info = rs6000_stack_info ();
28271 strategy = info->savres_strategy;
28272 using_load_multiple = strategy & REST_MULTIPLE;
28273 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28274 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28275 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28276 || rs6000_tune == PROCESSOR_PPC603
28277 || rs6000_tune == PROCESSOR_PPC750
28278 || optimize_size);
28279 /* Restore via the backchain when we have a large frame, since this
28280 is more efficient than an addis, addi pair. The second condition
28281 here will not trigger at the moment; We don't actually need a
28282 frame pointer for alloca, but the generic parts of the compiler
28283 give us one anyway. */
28284 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28285 ? info->lr_save_offset
28286 : 0) > 32767
28287 || (cfun->calls_alloca
28288 && !frame_pointer_needed));
28289 restore_lr = (info->lr_save_p
28290 && (restoring_FPRs_inline
28291 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28292 && (restoring_GPRs_inline
28293 || info->first_fp_reg_save < 64)
28294 && !cfun->machine->lr_is_wrapped_separately);
28297 if (WORLD_SAVE_P (info))
28299 int i, j;
28300 char rname[30];
28301 const char *alloc_rname;
28302 rtvec p;
28304 /* eh_rest_world_r10 will return to the location saved in the LR
28305 stack slot (which is not likely to be our caller.)
28306 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28307 rest_world is similar, except any R10 parameter is ignored.
28308 The exception-handling stuff that was here in 2.95 is no
28309 longer necessary. */
28311 p = rtvec_alloc (9
28312 + 32 - info->first_gp_reg_save
28313 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28314 + 63 + 1 - info->first_fp_reg_save);
28316 strcpy (rname, ((crtl->calls_eh_return) ?
28317 "*eh_rest_world_r10" : "*rest_world"));
28318 alloc_rname = ggc_strdup (rname);
28320 j = 0;
28321 RTVEC_ELT (p, j++) = ret_rtx;
28322 RTVEC_ELT (p, j++)
28323 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28324 /* The instruction pattern requires a clobber here;
28325 it is shared with the restVEC helper. */
28326 RTVEC_ELT (p, j++)
28327 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28330 /* CR register traditionally saved as CR2. */
28331 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28332 RTVEC_ELT (p, j++)
28333 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28334 if (flag_shrink_wrap)
28336 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28337 gen_rtx_REG (Pmode, LR_REGNO),
28338 cfa_restores);
28339 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28343 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28345 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28346 RTVEC_ELT (p, j++)
28347 = gen_frame_load (reg,
28348 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28349 if (flag_shrink_wrap
28350 && save_reg_p (info->first_gp_reg_save + i))
28351 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28353 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28355 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28356 RTVEC_ELT (p, j++)
28357 = gen_frame_load (reg,
28358 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28359 if (flag_shrink_wrap
28360 && save_reg_p (info->first_altivec_reg_save + i))
28361 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28363 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28365 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28366 ? DFmode : SFmode),
28367 info->first_fp_reg_save + i);
28368 RTVEC_ELT (p, j++)
28369 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28370 if (flag_shrink_wrap
28371 && save_reg_p (info->first_fp_reg_save + i))
28372 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28374 RTVEC_ELT (p, j++)
28375 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28376 RTVEC_ELT (p, j++)
28377 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28378 RTVEC_ELT (p, j++)
28379 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28380 RTVEC_ELT (p, j++)
28381 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28382 RTVEC_ELT (p, j++)
28383 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28384 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28386 if (flag_shrink_wrap)
28388 REG_NOTES (insn) = cfa_restores;
28389 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28390 RTX_FRAME_RELATED_P (insn) = 1;
28392 return;
28395 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28396 if (info->push_p)
28397 frame_off = info->total_size;
28399 /* Restore AltiVec registers if we must do so before adjusting the
28400 stack. */
28401 if (info->altivec_size != 0
28402 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28403 || (DEFAULT_ABI != ABI_V4
28404 && offset_below_red_zone_p (info->altivec_save_offset))))
28406 int i;
28407 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28409 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28410 if (use_backchain_to_restore_sp)
28412 int frame_regno = 11;
28414 if ((strategy & REST_INLINE_VRS) == 0)
28416 /* Of r11 and r12, select the one not clobbered by an
28417 out-of-line restore function for the frame register. */
28418 frame_regno = 11 + 12 - scratch_regno;
28420 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28421 emit_move_insn (frame_reg_rtx,
28422 gen_rtx_MEM (Pmode, sp_reg_rtx));
28423 frame_off = 0;
28425 else if (frame_pointer_needed)
28426 frame_reg_rtx = hard_frame_pointer_rtx;
28428 if ((strategy & REST_INLINE_VRS) == 0)
28430 int end_save = info->altivec_save_offset + info->altivec_size;
28431 int ptr_off;
28432 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28433 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28435 if (end_save + frame_off != 0)
28437 rtx offset = GEN_INT (end_save + frame_off);
28439 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28441 else
28442 emit_move_insn (ptr_reg, frame_reg_rtx);
28444 ptr_off = -end_save;
28445 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28446 info->altivec_save_offset + ptr_off,
28447 0, V4SImode, SAVRES_VR);
28449 else
28451 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28452 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28454 rtx addr, areg, mem, insn;
28455 rtx reg = gen_rtx_REG (V4SImode, i);
28456 HOST_WIDE_INT offset
28457 = (info->altivec_save_offset + frame_off
28458 + 16 * (i - info->first_altivec_reg_save));
28460 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28462 mem = gen_frame_mem (V4SImode,
28463 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28464 GEN_INT (offset)));
28465 insn = gen_rtx_SET (reg, mem);
28467 else
28469 areg = gen_rtx_REG (Pmode, 0);
28470 emit_move_insn (areg, GEN_INT (offset));
28472 /* AltiVec addressing mode is [reg+reg]. */
28473 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28474 mem = gen_frame_mem (V4SImode, addr);
28476 /* Rather than emitting a generic move, force use of the
28477 lvx instruction, which we always want. In particular we
28478 don't want lxvd2x/xxpermdi for little endian. */
28479 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28482 (void) emit_insn (insn);
28486 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28487 if (((strategy & REST_INLINE_VRS) == 0
28488 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28489 && (flag_shrink_wrap
28490 || (offset_below_red_zone_p
28491 (info->altivec_save_offset
28492 + 16 * (i - info->first_altivec_reg_save))))
28493 && save_reg_p (i))
28495 rtx reg = gen_rtx_REG (V4SImode, i);
28496 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28500 /* Restore VRSAVE if we must do so before adjusting the stack. */
28501 if (info->vrsave_size != 0
28502 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28503 || (DEFAULT_ABI != ABI_V4
28504 && offset_below_red_zone_p (info->vrsave_save_offset))))
28506 rtx reg;
28508 if (frame_reg_rtx == sp_reg_rtx)
28510 if (use_backchain_to_restore_sp)
28512 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28513 emit_move_insn (frame_reg_rtx,
28514 gen_rtx_MEM (Pmode, sp_reg_rtx));
28515 frame_off = 0;
28517 else if (frame_pointer_needed)
28518 frame_reg_rtx = hard_frame_pointer_rtx;
28521 reg = gen_rtx_REG (SImode, 12);
28522 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28523 info->vrsave_save_offset + frame_off));
28525 emit_insn (generate_set_vrsave (reg, info, 1));
28528 insn = NULL_RTX;
28529 /* If we have a large stack frame, restore the old stack pointer
28530 using the backchain. */
28531 if (use_backchain_to_restore_sp)
28533 if (frame_reg_rtx == sp_reg_rtx)
28535 /* Under V.4, don't reset the stack pointer until after we're done
28536 loading the saved registers. */
28537 if (DEFAULT_ABI == ABI_V4)
28538 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28540 insn = emit_move_insn (frame_reg_rtx,
28541 gen_rtx_MEM (Pmode, sp_reg_rtx));
28542 frame_off = 0;
28544 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28545 && DEFAULT_ABI == ABI_V4)
28546 /* frame_reg_rtx has been set up by the altivec restore. */
28548 else
28550 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28551 frame_reg_rtx = sp_reg_rtx;
28554 /* If we have a frame pointer, we can restore the old stack pointer
28555 from it. */
28556 else if (frame_pointer_needed)
28558 frame_reg_rtx = sp_reg_rtx;
28559 if (DEFAULT_ABI == ABI_V4)
28560 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28561 /* Prevent reordering memory accesses against stack pointer restore. */
28562 else if (cfun->calls_alloca
28563 || offset_below_red_zone_p (-info->total_size))
28564 rs6000_emit_stack_tie (frame_reg_rtx, true);
28566 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28567 GEN_INT (info->total_size)));
28568 frame_off = 0;
28570 else if (info->push_p
28571 && DEFAULT_ABI != ABI_V4
28572 && !crtl->calls_eh_return)
28574 /* Prevent reordering memory accesses against stack pointer restore. */
28575 if (cfun->calls_alloca
28576 || offset_below_red_zone_p (-info->total_size))
28577 rs6000_emit_stack_tie (frame_reg_rtx, false);
28578 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28579 GEN_INT (info->total_size)));
28580 frame_off = 0;
28582 if (insn && frame_reg_rtx == sp_reg_rtx)
28584 if (cfa_restores)
28586 REG_NOTES (insn) = cfa_restores;
28587 cfa_restores = NULL_RTX;
28589 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28590 RTX_FRAME_RELATED_P (insn) = 1;
28593 /* Restore AltiVec registers if we have not done so already. */
28594 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28595 && info->altivec_size != 0
28596 && (DEFAULT_ABI == ABI_V4
28597 || !offset_below_red_zone_p (info->altivec_save_offset)))
28599 int i;
28601 if ((strategy & REST_INLINE_VRS) == 0)
28603 int end_save = info->altivec_save_offset + info->altivec_size;
28604 int ptr_off;
28605 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28606 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28607 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28609 if (end_save + frame_off != 0)
28611 rtx offset = GEN_INT (end_save + frame_off);
28613 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28615 else
28616 emit_move_insn (ptr_reg, frame_reg_rtx);
28618 ptr_off = -end_save;
28619 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28620 info->altivec_save_offset + ptr_off,
28621 0, V4SImode, SAVRES_VR);
28622 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28624 /* Frame reg was clobbered by out-of-line save. Restore it
28625 from ptr_reg, and if we are calling out-of-line gpr or
28626 fpr restore set up the correct pointer and offset. */
28627 unsigned newptr_regno = 1;
28628 if (!restoring_GPRs_inline)
28630 bool lr = info->gp_save_offset + info->gp_size == 0;
28631 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28632 newptr_regno = ptr_regno_for_savres (sel);
28633 end_save = info->gp_save_offset + info->gp_size;
28635 else if (!restoring_FPRs_inline)
28637 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28638 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28639 newptr_regno = ptr_regno_for_savres (sel);
28640 end_save = info->fp_save_offset + info->fp_size;
28643 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28644 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28646 if (end_save + ptr_off != 0)
28648 rtx offset = GEN_INT (end_save + ptr_off);
28650 frame_off = -end_save;
28651 if (TARGET_32BIT)
28652 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28653 ptr_reg, offset));
28654 else
28655 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28656 ptr_reg, offset));
28658 else
28660 frame_off = ptr_off;
28661 emit_move_insn (frame_reg_rtx, ptr_reg);
28665 else
28667 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28668 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28670 rtx addr, areg, mem, insn;
28671 rtx reg = gen_rtx_REG (V4SImode, i);
28672 HOST_WIDE_INT offset
28673 = (info->altivec_save_offset + frame_off
28674 + 16 * (i - info->first_altivec_reg_save));
28676 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28678 mem = gen_frame_mem (V4SImode,
28679 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28680 GEN_INT (offset)));
28681 insn = gen_rtx_SET (reg, mem);
28683 else
28685 areg = gen_rtx_REG (Pmode, 0);
28686 emit_move_insn (areg, GEN_INT (offset));
28688 /* AltiVec addressing mode is [reg+reg]. */
28689 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28690 mem = gen_frame_mem (V4SImode, addr);
28692 /* Rather than emitting a generic move, force use of the
28693 lvx instruction, which we always want. In particular we
28694 don't want lxvd2x/xxpermdi for little endian. */
28695 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28698 (void) emit_insn (insn);
28702 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28703 if (((strategy & REST_INLINE_VRS) == 0
28704 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28705 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28706 && save_reg_p (i))
28708 rtx reg = gen_rtx_REG (V4SImode, i);
28709 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28713 /* Restore VRSAVE if we have not done so already. */
28714 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28715 && info->vrsave_size != 0
28716 && (DEFAULT_ABI == ABI_V4
28717 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28719 rtx reg;
28721 reg = gen_rtx_REG (SImode, 12);
28722 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28723 info->vrsave_save_offset + frame_off));
28725 emit_insn (generate_set_vrsave (reg, info, 1));
28728 /* If we exit by an out-of-line restore function on ABI_V4 then that
28729 function will deallocate the stack, so we don't need to worry
28730 about the unwinder restoring cr from an invalid stack frame
28731 location. */
28732 exit_func = (!restoring_FPRs_inline
28733 || (!restoring_GPRs_inline
28734 && info->first_fp_reg_save == 64));
28736 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28737 *separate* slots if the routine calls __builtin_eh_return, so
28738 that they can be independently restored by the unwinder. */
28739 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28741 int i, cr_off = info->ehcr_offset;
28743 for (i = 0; i < 8; i++)
28744 if (!call_used_regs[CR0_REGNO + i])
28746 rtx reg = gen_rtx_REG (SImode, 0);
28747 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28748 cr_off + frame_off));
28750 insn = emit_insn (gen_movsi_to_cr_one
28751 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28753 if (!exit_func && flag_shrink_wrap)
28755 add_reg_note (insn, REG_CFA_RESTORE,
28756 gen_rtx_REG (SImode, CR0_REGNO + i));
28758 RTX_FRAME_RELATED_P (insn) = 1;
28761 cr_off += reg_size;
28765 /* Get the old lr if we saved it. If we are restoring registers
28766 out-of-line, then the out-of-line routines can do this for us. */
28767 if (restore_lr && restoring_GPRs_inline)
28768 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28770 /* Get the old cr if we saved it. */
28771 if (info->cr_save_p)
28773 unsigned cr_save_regno = 12;
28775 if (!restoring_GPRs_inline)
28777 /* Ensure we don't use the register used by the out-of-line
28778 gpr register restore below. */
28779 bool lr = info->gp_save_offset + info->gp_size == 0;
28780 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28781 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28783 if (gpr_ptr_regno == 12)
28784 cr_save_regno = 11;
28785 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28787 else if (REGNO (frame_reg_rtx) == 12)
28788 cr_save_regno = 11;
28790 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28791 info->cr_save_offset + frame_off,
28792 exit_func);
28795 /* Set LR here to try to overlap restores below. */
28796 if (restore_lr && restoring_GPRs_inline)
28797 restore_saved_lr (0, exit_func);
28799 /* Load exception handler data registers, if needed. */
28800 if (crtl->calls_eh_return)
28802 unsigned int i, regno;
28804 if (TARGET_AIX)
28806 rtx reg = gen_rtx_REG (reg_mode, 2);
28807 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28808 frame_off + RS6000_TOC_SAVE_SLOT));
28811 for (i = 0; ; ++i)
28813 rtx mem;
28815 regno = EH_RETURN_DATA_REGNO (i);
28816 if (regno == INVALID_REGNUM)
28817 break;
28819 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28820 info->ehrd_offset + frame_off
28821 + reg_size * (int) i);
28823 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28827 /* Restore GPRs. This is done as a PARALLEL if we are using
28828 the load-multiple instructions. */
28829 if (!restoring_GPRs_inline)
28831 /* We are jumping to an out-of-line function. */
28832 rtx ptr_reg;
28833 int end_save = info->gp_save_offset + info->gp_size;
28834 bool can_use_exit = end_save == 0;
28835 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28836 int ptr_off;
28838 /* Emit stack reset code if we need it. */
28839 ptr_regno = ptr_regno_for_savres (sel);
28840 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28841 if (can_use_exit)
28842 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28843 else if (end_save + frame_off != 0)
28844 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28845 GEN_INT (end_save + frame_off)));
28846 else if (REGNO (frame_reg_rtx) != ptr_regno)
28847 emit_move_insn (ptr_reg, frame_reg_rtx);
28848 if (REGNO (frame_reg_rtx) == ptr_regno)
28849 frame_off = -end_save;
28851 if (can_use_exit && info->cr_save_p)
28852 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28854 ptr_off = -end_save;
28855 rs6000_emit_savres_rtx (info, ptr_reg,
28856 info->gp_save_offset + ptr_off,
28857 info->lr_save_offset + ptr_off,
28858 reg_mode, sel);
28860 else if (using_load_multiple)
28862 rtvec p;
28863 p = rtvec_alloc (32 - info->first_gp_reg_save);
28864 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28865 RTVEC_ELT (p, i)
28866 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28867 frame_reg_rtx,
28868 info->gp_save_offset + frame_off + reg_size * i);
28869 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28871 else
28873 int offset = info->gp_save_offset + frame_off;
28874 for (i = info->first_gp_reg_save; i < 32; i++)
28876 if (save_reg_p (i)
28877 && !cfun->machine->gpr_is_wrapped_separately[i])
28879 rtx reg = gen_rtx_REG (reg_mode, i);
28880 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28883 offset += reg_size;
28887 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28889 /* If the frame pointer was used then we can't delay emitting
28890 a REG_CFA_DEF_CFA note. This must happen on the insn that
28891 restores the frame pointer, r31. We may have already emitted
28892 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28893 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28894 be harmless if emitted. */
28895 if (frame_pointer_needed)
28897 insn = get_last_insn ();
28898 add_reg_note (insn, REG_CFA_DEF_CFA,
28899 plus_constant (Pmode, frame_reg_rtx, frame_off));
28900 RTX_FRAME_RELATED_P (insn) = 1;
28903 /* Set up cfa_restores. We always need these when
28904 shrink-wrapping. If not shrink-wrapping then we only need
28905 the cfa_restore when the stack location is no longer valid.
28906 The cfa_restores must be emitted on or before the insn that
28907 invalidates the stack, and of course must not be emitted
28908 before the insn that actually does the restore. The latter
28909 is why it is a bad idea to emit the cfa_restores as a group
28910 on the last instruction here that actually does a restore:
28911 That insn may be reordered with respect to others doing
28912 restores. */
28913 if (flag_shrink_wrap
28914 && !restoring_GPRs_inline
28915 && info->first_fp_reg_save == 64)
28916 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28918 for (i = info->first_gp_reg_save; i < 32; i++)
28919 if (save_reg_p (i)
28920 && !cfun->machine->gpr_is_wrapped_separately[i])
28922 rtx reg = gen_rtx_REG (reg_mode, i);
28923 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28927 if (!restoring_GPRs_inline
28928 && info->first_fp_reg_save == 64)
28930 /* We are jumping to an out-of-line function. */
28931 if (cfa_restores)
28932 emit_cfa_restores (cfa_restores);
28933 return;
28936 if (restore_lr && !restoring_GPRs_inline)
28938 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28939 restore_saved_lr (0, exit_func);
28942 /* Restore fpr's if we need to do it without calling a function. */
28943 if (restoring_FPRs_inline)
28945 int offset = info->fp_save_offset + frame_off;
28946 for (i = info->first_fp_reg_save; i < 64; i++)
28948 if (save_reg_p (i)
28949 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28951 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28952 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28953 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28954 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28955 cfa_restores);
28958 offset += fp_reg_size;
28962 /* If we saved cr, restore it here. Just those that were used. */
28963 if (info->cr_save_p)
28964 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28966 /* If this is V.4, unwind the stack pointer after all of the loads
28967 have been done, or set up r11 if we are restoring fp out of line. */
28968 ptr_regno = 1;
28969 if (!restoring_FPRs_inline)
28971 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28972 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28973 ptr_regno = ptr_regno_for_savres (sel);
28976 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28977 if (REGNO (frame_reg_rtx) == ptr_regno)
28978 frame_off = 0;
28980 if (insn && restoring_FPRs_inline)
28982 if (cfa_restores)
28984 REG_NOTES (insn) = cfa_restores;
28985 cfa_restores = NULL_RTX;
28987 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28988 RTX_FRAME_RELATED_P (insn) = 1;
28991 if (crtl->calls_eh_return)
28993 rtx sa = EH_RETURN_STACKADJ_RTX;
28994 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28997 if (!sibcall && restoring_FPRs_inline)
28999 if (cfa_restores)
29001 /* We can't hang the cfa_restores off a simple return,
29002 since the shrink-wrap code sometimes uses an existing
29003 return. This means there might be a path from
29004 pre-prologue code to this return, and dwarf2cfi code
29005 wants the eh_frame unwinder state to be the same on
29006 all paths to any point. So we need to emit the
29007 cfa_restores before the return. For -m64 we really
29008 don't need epilogue cfa_restores at all, except for
29009 this irritating dwarf2cfi with shrink-wrap
29010 requirement; The stack red-zone means eh_frame info
29011 from the prologue telling the unwinder to restore
29012 from the stack is perfectly good right to the end of
29013 the function. */
29014 emit_insn (gen_blockage ());
29015 emit_cfa_restores (cfa_restores);
29016 cfa_restores = NULL_RTX;
29019 emit_jump_insn (targetm.gen_simple_return ());
29022 if (!sibcall && !restoring_FPRs_inline)
29024 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29025 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
29026 int elt = 0;
29027 RTVEC_ELT (p, elt++) = ret_rtx;
29028 if (lr)
29029 RTVEC_ELT (p, elt++)
29030 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29032 /* We have to restore more than two FP registers, so branch to the
29033 restore function. It will return to our caller. */
29034 int i;
29035 int reg;
29036 rtx sym;
29038 if (flag_shrink_wrap)
29039 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29041 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
29042 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
29043 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
29044 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
29046 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29048 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
29050 RTVEC_ELT (p, elt++)
29051 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
29052 if (flag_shrink_wrap
29053 && save_reg_p (info->first_fp_reg_save + i))
29054 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29057 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29060 if (cfa_restores)
29062 if (sibcall)
29063 /* Ensure the cfa_restores are hung off an insn that won't
29064 be reordered above other restores. */
29065 emit_insn (gen_blockage ());
29067 emit_cfa_restores (cfa_restores);
29071 /* Write function epilogue. */
29073 static void
29074 rs6000_output_function_epilogue (FILE *file)
29076 #if TARGET_MACHO
29077 macho_branch_islands ();
29080 rtx_insn *insn = get_last_insn ();
29081 rtx_insn *deleted_debug_label = NULL;
29083 /* Mach-O doesn't support labels at the end of objects, so if
29084 it looks like we might want one, take special action.
29086 First, collect any sequence of deleted debug labels. */
29087 while (insn
29088 && NOTE_P (insn)
29089 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29091 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29092 notes only, instead set their CODE_LABEL_NUMBER to -1,
29093 otherwise there would be code generation differences
29094 in between -g and -g0. */
29095 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29096 deleted_debug_label = insn;
29097 insn = PREV_INSN (insn);
29100 /* Second, if we have:
29101 label:
29102 barrier
29103 then this needs to be detected, so skip past the barrier. */
29105 if (insn && BARRIER_P (insn))
29106 insn = PREV_INSN (insn);
29108 /* Up to now we've only seen notes or barriers. */
29109 if (insn)
29111 if (LABEL_P (insn)
29112 || (NOTE_P (insn)
29113 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29114 /* Trailing label: <barrier>. */
29115 fputs ("\tnop\n", file);
29116 else
29118 /* Lastly, see if we have a completely empty function body. */
29119 while (insn && ! INSN_P (insn))
29120 insn = PREV_INSN (insn);
29121 /* If we don't find any insns, we've got an empty function body;
29122 I.e. completely empty - without a return or branch. This is
29123 taken as the case where a function body has been removed
29124 because it contains an inline __builtin_unreachable(). GCC
29125 states that reaching __builtin_unreachable() means UB so we're
29126 not obliged to do anything special; however, we want
29127 non-zero-sized function bodies. To meet this, and help the
29128 user out, let's trap the case. */
29129 if (insn == NULL)
29130 fputs ("\ttrap\n", file);
29133 else if (deleted_debug_label)
29134 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29135 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29136 CODE_LABEL_NUMBER (insn) = -1;
29138 #endif
29140 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29141 on its format.
29143 We don't output a traceback table if -finhibit-size-directive was
29144 used. The documentation for -finhibit-size-directive reads
29145 ``don't output a @code{.size} assembler directive, or anything
29146 else that would cause trouble if the function is split in the
29147 middle, and the two halves are placed at locations far apart in
29148 memory.'' The traceback table has this property, since it
29149 includes the offset from the start of the function to the
29150 traceback table itself.
29152 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29153 different traceback table. */
29154 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29155 && ! flag_inhibit_size_directive
29156 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29158 const char *fname = NULL;
29159 const char *language_string = lang_hooks.name;
29160 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29161 int i;
29162 int optional_tbtab;
29163 rs6000_stack_t *info = rs6000_stack_info ();
29165 if (rs6000_traceback == traceback_full)
29166 optional_tbtab = 1;
29167 else if (rs6000_traceback == traceback_part)
29168 optional_tbtab = 0;
29169 else
29170 optional_tbtab = !optimize_size && !TARGET_ELF;
29172 if (optional_tbtab)
29174 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29175 while (*fname == '.') /* V.4 encodes . in the name */
29176 fname++;
29178 /* Need label immediately before tbtab, so we can compute
29179 its offset from the function start. */
29180 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29181 ASM_OUTPUT_LABEL (file, fname);
29184 /* The .tbtab pseudo-op can only be used for the first eight
29185 expressions, since it can't handle the possibly variable
29186 length fields that follow. However, if you omit the optional
29187 fields, the assembler outputs zeros for all optional fields
29188 anyways, giving each variable length field is minimum length
29189 (as defined in sys/debug.h). Thus we can not use the .tbtab
29190 pseudo-op at all. */
29192 /* An all-zero word flags the start of the tbtab, for debuggers
29193 that have to find it by searching forward from the entry
29194 point or from the current pc. */
29195 fputs ("\t.long 0\n", file);
29197 /* Tbtab format type. Use format type 0. */
29198 fputs ("\t.byte 0,", file);
29200 /* Language type. Unfortunately, there does not seem to be any
29201 official way to discover the language being compiled, so we
29202 use language_string.
29203 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29204 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29205 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29206 either, so for now use 0. */
29207 if (lang_GNU_C ()
29208 || ! strcmp (language_string, "GNU GIMPLE")
29209 || ! strcmp (language_string, "GNU Go")
29210 || ! strcmp (language_string, "libgccjit"))
29211 i = 0;
29212 else if (! strcmp (language_string, "GNU F77")
29213 || lang_GNU_Fortran ())
29214 i = 1;
29215 else if (! strcmp (language_string, "GNU Pascal"))
29216 i = 2;
29217 else if (! strcmp (language_string, "GNU Ada"))
29218 i = 3;
29219 else if (lang_GNU_CXX ()
29220 || ! strcmp (language_string, "GNU Objective-C++"))
29221 i = 9;
29222 else if (! strcmp (language_string, "GNU Java"))
29223 i = 13;
29224 else if (! strcmp (language_string, "GNU Objective-C"))
29225 i = 14;
29226 else
29227 gcc_unreachable ();
29228 fprintf (file, "%d,", i);
29230 /* 8 single bit fields: global linkage (not set for C extern linkage,
29231 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29232 from start of procedure stored in tbtab, internal function, function
29233 has controlled storage, function has no toc, function uses fp,
29234 function logs/aborts fp operations. */
29235 /* Assume that fp operations are used if any fp reg must be saved. */
29236 fprintf (file, "%d,",
29237 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29239 /* 6 bitfields: function is interrupt handler, name present in
29240 proc table, function calls alloca, on condition directives
29241 (controls stack walks, 3 bits), saves condition reg, saves
29242 link reg. */
29243 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29244 set up as a frame pointer, even when there is no alloca call. */
29245 fprintf (file, "%d,",
29246 ((optional_tbtab << 6)
29247 | ((optional_tbtab & frame_pointer_needed) << 5)
29248 | (info->cr_save_p << 1)
29249 | (info->lr_save_p)));
29251 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29252 (6 bits). */
29253 fprintf (file, "%d,",
29254 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29256 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29257 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29259 if (optional_tbtab)
29261 /* Compute the parameter info from the function decl argument
29262 list. */
29263 tree decl;
29264 int next_parm_info_bit = 31;
29266 for (decl = DECL_ARGUMENTS (current_function_decl);
29267 decl; decl = DECL_CHAIN (decl))
29269 rtx parameter = DECL_INCOMING_RTL (decl);
29270 machine_mode mode = GET_MODE (parameter);
29272 if (GET_CODE (parameter) == REG)
29274 if (SCALAR_FLOAT_MODE_P (mode))
29276 int bits;
29278 float_parms++;
29280 switch (mode)
29282 case E_SFmode:
29283 case E_SDmode:
29284 bits = 0x2;
29285 break;
29287 case E_DFmode:
29288 case E_DDmode:
29289 case E_TFmode:
29290 case E_TDmode:
29291 case E_IFmode:
29292 case E_KFmode:
29293 bits = 0x3;
29294 break;
29296 default:
29297 gcc_unreachable ();
29300 /* If only one bit will fit, don't or in this entry. */
29301 if (next_parm_info_bit > 0)
29302 parm_info |= (bits << (next_parm_info_bit - 1));
29303 next_parm_info_bit -= 2;
29305 else
29307 fixed_parms += ((GET_MODE_SIZE (mode)
29308 + (UNITS_PER_WORD - 1))
29309 / UNITS_PER_WORD);
29310 next_parm_info_bit -= 1;
29316 /* Number of fixed point parameters. */
29317 /* This is actually the number of words of fixed point parameters; thus
29318 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29319 fprintf (file, "%d,", fixed_parms);
29321 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29322 all on stack. */
29323 /* This is actually the number of fp registers that hold parameters;
29324 and thus the maximum value is 13. */
29325 /* Set parameters on stack bit if parameters are not in their original
29326 registers, regardless of whether they are on the stack? Xlc
29327 seems to set the bit when not optimizing. */
29328 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29330 if (optional_tbtab)
29332 /* Optional fields follow. Some are variable length. */
29334 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29335 float, 11 double float. */
29336 /* There is an entry for each parameter in a register, in the order
29337 that they occur in the parameter list. Any intervening arguments
29338 on the stack are ignored. If the list overflows a long (max
29339 possible length 34 bits) then completely leave off all elements
29340 that don't fit. */
29341 /* Only emit this long if there was at least one parameter. */
29342 if (fixed_parms || float_parms)
29343 fprintf (file, "\t.long %d\n", parm_info);
29345 /* Offset from start of code to tb table. */
29346 fputs ("\t.long ", file);
29347 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29348 RS6000_OUTPUT_BASENAME (file, fname);
29349 putc ('-', file);
29350 rs6000_output_function_entry (file, fname);
29351 putc ('\n', file);
29353 /* Interrupt handler mask. */
29354 /* Omit this long, since we never set the interrupt handler bit
29355 above. */
29357 /* Number of CTL (controlled storage) anchors. */
29358 /* Omit this long, since the has_ctl bit is never set above. */
29360 /* Displacement into stack of each CTL anchor. */
29361 /* Omit this list of longs, because there are no CTL anchors. */
29363 /* Length of function name. */
29364 if (*fname == '*')
29365 ++fname;
29366 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29368 /* Function name. */
29369 assemble_string (fname, strlen (fname));
29371 /* Register for alloca automatic storage; this is always reg 31.
29372 Only emit this if the alloca bit was set above. */
29373 if (frame_pointer_needed)
29374 fputs ("\t.byte 31\n", file);
29376 fputs ("\t.align 2\n", file);
29380 /* Arrange to define .LCTOC1 label, if not already done. */
29381 if (need_toc_init)
29383 need_toc_init = 0;
29384 if (!toc_initialized)
29386 switch_to_section (toc_section);
29387 switch_to_section (current_function_section ());
29392 /* -fsplit-stack support. */
29394 /* A SYMBOL_REF for __morestack. */
29395 static GTY(()) rtx morestack_ref;
29397 static rtx
29398 gen_add3_const (rtx rt, rtx ra, long c)
29400 if (TARGET_64BIT)
29401 return gen_adddi3 (rt, ra, GEN_INT (c));
29402 else
29403 return gen_addsi3 (rt, ra, GEN_INT (c));
29406 /* Emit -fsplit-stack prologue, which goes before the regular function
29407 prologue (at local entry point in the case of ELFv2). */
29409 void
29410 rs6000_expand_split_stack_prologue (void)
29412 rs6000_stack_t *info = rs6000_stack_info ();
29413 unsigned HOST_WIDE_INT allocate;
29414 long alloc_hi, alloc_lo;
29415 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29416 rtx_insn *insn;
29418 gcc_assert (flag_split_stack && reload_completed);
29420 if (!info->push_p)
29421 return;
29423 if (global_regs[29])
29425 error ("%qs uses register r29", "-fsplit-stack");
29426 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29427 "conflicts with %qD", global_regs_decl[29]);
29430 allocate = info->total_size;
29431 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29433 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29434 return;
29436 if (morestack_ref == NULL_RTX)
29438 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29439 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29440 | SYMBOL_FLAG_FUNCTION);
29443 r0 = gen_rtx_REG (Pmode, 0);
29444 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29445 r12 = gen_rtx_REG (Pmode, 12);
29446 emit_insn (gen_load_split_stack_limit (r0));
29447 /* Always emit two insns here to calculate the requested stack,
29448 so that the linker can edit them when adjusting size for calling
29449 non-split-stack code. */
29450 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29451 alloc_lo = -allocate - alloc_hi;
29452 if (alloc_hi != 0)
29454 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29455 if (alloc_lo != 0)
29456 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29457 else
29458 emit_insn (gen_nop ());
29460 else
29462 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29463 emit_insn (gen_nop ());
29466 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29467 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29468 ok_label = gen_label_rtx ();
29469 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29470 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29471 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29472 pc_rtx);
29473 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29474 JUMP_LABEL (insn) = ok_label;
29475 /* Mark the jump as very likely to be taken. */
29476 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29478 lr = gen_rtx_REG (Pmode, LR_REGNO);
29479 insn = emit_move_insn (r0, lr);
29480 RTX_FRAME_RELATED_P (insn) = 1;
29481 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29482 RTX_FRAME_RELATED_P (insn) = 1;
29484 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29485 const0_rtx, const0_rtx));
29486 call_fusage = NULL_RTX;
29487 use_reg (&call_fusage, r12);
29488 /* Say the call uses r0, even though it doesn't, to stop regrename
29489 from twiddling with the insns saving lr, trashing args for cfun.
29490 The insns restoring lr are similarly protected by making
29491 split_stack_return use r0. */
29492 use_reg (&call_fusage, r0);
29493 add_function_usage_to (insn, call_fusage);
29494 /* Indicate that this function can't jump to non-local gotos. */
29495 make_reg_eh_region_note_nothrow_nononlocal (insn);
29496 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29497 insn = emit_move_insn (lr, r0);
29498 add_reg_note (insn, REG_CFA_RESTORE, lr);
29499 RTX_FRAME_RELATED_P (insn) = 1;
29500 emit_insn (gen_split_stack_return ());
29502 emit_label (ok_label);
29503 LABEL_NUSES (ok_label) = 1;
29506 /* Return the internal arg pointer used for function incoming
29507 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29508 to copy it to a pseudo in order for it to be preserved over calls
29509 and suchlike. We'd really like to use a pseudo here for the
29510 internal arg pointer but data-flow analysis is not prepared to
29511 accept pseudos as live at the beginning of a function. */
29513 static rtx
29514 rs6000_internal_arg_pointer (void)
29516 if (flag_split_stack
29517 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29518 == NULL))
29521 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29523 rtx pat;
29525 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29526 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29528 /* Put the pseudo initialization right after the note at the
29529 beginning of the function. */
29530 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29531 gen_rtx_REG (Pmode, 12));
29532 push_topmost_sequence ();
29533 emit_insn_after (pat, get_insns ());
29534 pop_topmost_sequence ();
29536 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29537 FIRST_PARM_OFFSET (current_function_decl));
29538 return copy_to_reg (ret);
29540 return virtual_incoming_args_rtx;
29543 /* We may have to tell the dataflow pass that the split stack prologue
29544 is initializing a register. */
29546 static void
29547 rs6000_live_on_entry (bitmap regs)
29549 if (flag_split_stack)
29550 bitmap_set_bit (regs, 12);
29553 /* Emit -fsplit-stack dynamic stack allocation space check. */
29555 void
29556 rs6000_split_stack_space_check (rtx size, rtx label)
29558 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29559 rtx limit = gen_reg_rtx (Pmode);
29560 rtx requested = gen_reg_rtx (Pmode);
29561 rtx cmp = gen_reg_rtx (CCUNSmode);
29562 rtx jump;
29564 emit_insn (gen_load_split_stack_limit (limit));
29565 if (CONST_INT_P (size))
29566 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29567 else
29569 size = force_reg (Pmode, size);
29570 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29572 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29573 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29574 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29575 gen_rtx_LABEL_REF (VOIDmode, label),
29576 pc_rtx);
29577 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29578 JUMP_LABEL (jump) = label;
29581 /* A C compound statement that outputs the assembler code for a thunk
29582 function, used to implement C++ virtual function calls with
29583 multiple inheritance. The thunk acts as a wrapper around a virtual
29584 function, adjusting the implicit object parameter before handing
29585 control off to the real function.
29587 First, emit code to add the integer DELTA to the location that
29588 contains the incoming first argument. Assume that this argument
29589 contains a pointer, and is the one used to pass the `this' pointer
29590 in C++. This is the incoming argument *before* the function
29591 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29592 values of all other incoming arguments.
29594 After the addition, emit code to jump to FUNCTION, which is a
29595 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29596 not touch the return address. Hence returning from FUNCTION will
29597 return to whoever called the current `thunk'.
29599 The effect must be as if FUNCTION had been called directly with the
29600 adjusted first argument. This macro is responsible for emitting
29601 all of the code for a thunk function; output_function_prologue()
29602 and output_function_epilogue() are not invoked.
29604 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29605 been extracted from it.) It might possibly be useful on some
29606 targets, but probably not.
29608 If you do not define this macro, the target-independent code in the
29609 C++ frontend will generate a less efficient heavyweight thunk that
29610 calls FUNCTION instead of jumping to it. The generic approach does
29611 not support varargs. */
29613 static void
29614 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29615 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29616 tree function)
29618 rtx this_rtx, funexp;
29619 rtx_insn *insn;
29621 reload_completed = 1;
29622 epilogue_completed = 1;
29624 /* Mark the end of the (empty) prologue. */
29625 emit_note (NOTE_INSN_PROLOGUE_END);
29627 /* Find the "this" pointer. If the function returns a structure,
29628 the structure return pointer is in r3. */
29629 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29630 this_rtx = gen_rtx_REG (Pmode, 4);
29631 else
29632 this_rtx = gen_rtx_REG (Pmode, 3);
29634 /* Apply the constant offset, if required. */
29635 if (delta)
29636 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29638 /* Apply the offset from the vtable, if required. */
29639 if (vcall_offset)
29641 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29642 rtx tmp = gen_rtx_REG (Pmode, 12);
29644 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29645 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29647 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29648 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29650 else
29652 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29654 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29656 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29659 /* Generate a tail call to the target function. */
29660 if (!TREE_USED (function))
29662 assemble_external (function);
29663 TREE_USED (function) = 1;
29665 funexp = XEXP (DECL_RTL (function), 0);
29666 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29668 #if TARGET_MACHO
29669 if (MACHOPIC_INDIRECT)
29670 funexp = machopic_indirect_call_target (funexp);
29671 #endif
29673 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29674 generate sibcall RTL explicitly. */
29675 insn = emit_call_insn (
29676 gen_rtx_PARALLEL (VOIDmode,
29677 gen_rtvec (3,
29678 gen_rtx_CALL (VOIDmode,
29679 funexp, const0_rtx),
29680 gen_rtx_USE (VOIDmode, const0_rtx),
29681 simple_return_rtx)));
29682 SIBLING_CALL_P (insn) = 1;
29683 emit_barrier ();
29685 /* Run just enough of rest_of_compilation to get the insns emitted.
29686 There's not really enough bulk here to make other passes such as
29687 instruction scheduling worth while. Note that use_thunk calls
29688 assemble_start_function and assemble_end_function. */
29689 insn = get_insns ();
29690 shorten_branches (insn);
29691 final_start_function (insn, file, 1);
29692 final (insn, file, 1);
29693 final_end_function ();
29695 reload_completed = 0;
29696 epilogue_completed = 0;
29699 /* A quick summary of the various types of 'constant-pool tables'
29700 under PowerPC:
29702 Target Flags Name One table per
29703 AIX (none) AIX TOC object file
29704 AIX -mfull-toc AIX TOC object file
29705 AIX -mminimal-toc AIX minimal TOC translation unit
29706 SVR4/EABI (none) SVR4 SDATA object file
29707 SVR4/EABI -fpic SVR4 pic object file
29708 SVR4/EABI -fPIC SVR4 PIC translation unit
29709 SVR4/EABI -mrelocatable EABI TOC function
29710 SVR4/EABI -maix AIX TOC object file
29711 SVR4/EABI -maix -mminimal-toc
29712 AIX minimal TOC translation unit
29714 Name Reg. Set by entries contains:
29715 made by addrs? fp? sum?
29717 AIX TOC 2 crt0 as Y option option
29718 AIX minimal TOC 30 prolog gcc Y Y option
29719 SVR4 SDATA 13 crt0 gcc N Y N
29720 SVR4 pic 30 prolog ld Y not yet N
29721 SVR4 PIC 30 prolog gcc Y option option
29722 EABI TOC 30 prolog gcc Y option option
29726 /* Hash functions for the hash table. */
29728 static unsigned
29729 rs6000_hash_constant (rtx k)
29731 enum rtx_code code = GET_CODE (k);
29732 machine_mode mode = GET_MODE (k);
29733 unsigned result = (code << 3) ^ mode;
29734 const char *format;
29735 int flen, fidx;
29737 format = GET_RTX_FORMAT (code);
29738 flen = strlen (format);
29739 fidx = 0;
29741 switch (code)
29743 case LABEL_REF:
29744 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29746 case CONST_WIDE_INT:
29748 int i;
29749 flen = CONST_WIDE_INT_NUNITS (k);
29750 for (i = 0; i < flen; i++)
29751 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29752 return result;
29755 case CONST_DOUBLE:
29756 if (mode != VOIDmode)
29757 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29758 flen = 2;
29759 break;
29761 case CODE_LABEL:
29762 fidx = 3;
29763 break;
29765 default:
29766 break;
29769 for (; fidx < flen; fidx++)
29770 switch (format[fidx])
29772 case 's':
29774 unsigned i, len;
29775 const char *str = XSTR (k, fidx);
29776 len = strlen (str);
29777 result = result * 613 + len;
29778 for (i = 0; i < len; i++)
29779 result = result * 613 + (unsigned) str[i];
29780 break;
29782 case 'u':
29783 case 'e':
29784 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29785 break;
29786 case 'i':
29787 case 'n':
29788 result = result * 613 + (unsigned) XINT (k, fidx);
29789 break;
29790 case 'w':
29791 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29792 result = result * 613 + (unsigned) XWINT (k, fidx);
29793 else
29795 size_t i;
29796 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29797 result = result * 613 + (unsigned) (XWINT (k, fidx)
29798 >> CHAR_BIT * i);
29800 break;
29801 case '0':
29802 break;
29803 default:
29804 gcc_unreachable ();
29807 return result;
29810 hashval_t
29811 toc_hasher::hash (toc_hash_struct *thc)
29813 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29816 /* Compare H1 and H2 for equivalence. */
29818 bool
29819 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29821 rtx r1 = h1->key;
29822 rtx r2 = h2->key;
29824 if (h1->key_mode != h2->key_mode)
29825 return 0;
29827 return rtx_equal_p (r1, r2);
29830 /* These are the names given by the C++ front-end to vtables, and
29831 vtable-like objects. Ideally, this logic should not be here;
29832 instead, there should be some programmatic way of inquiring as
29833 to whether or not an object is a vtable. */
29835 #define VTABLE_NAME_P(NAME) \
29836 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29837 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29838 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29839 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29840 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29842 #ifdef NO_DOLLAR_IN_LABEL
29843 /* Return a GGC-allocated character string translating dollar signs in
29844 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29846 const char *
29847 rs6000_xcoff_strip_dollar (const char *name)
29849 char *strip, *p;
29850 const char *q;
29851 size_t len;
29853 q = (const char *) strchr (name, '$');
29855 if (q == 0 || q == name)
29856 return name;
29858 len = strlen (name);
29859 strip = XALLOCAVEC (char, len + 1);
29860 strcpy (strip, name);
29861 p = strip + (q - name);
29862 while (p)
29864 *p = '_';
29865 p = strchr (p + 1, '$');
29868 return ggc_alloc_string (strip, len);
29870 #endif
29872 void
29873 rs6000_output_symbol_ref (FILE *file, rtx x)
29875 const char *name = XSTR (x, 0);
29877 /* Currently C++ toc references to vtables can be emitted before it
29878 is decided whether the vtable is public or private. If this is
29879 the case, then the linker will eventually complain that there is
29880 a reference to an unknown section. Thus, for vtables only,
29881 we emit the TOC reference to reference the identifier and not the
29882 symbol. */
29883 if (VTABLE_NAME_P (name))
29885 RS6000_OUTPUT_BASENAME (file, name);
29887 else
29888 assemble_name (file, name);
29891 /* Output a TOC entry. We derive the entry name from what is being
29892 written. */
29894 void
29895 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29897 char buf[256];
29898 const char *name = buf;
29899 rtx base = x;
29900 HOST_WIDE_INT offset = 0;
29902 gcc_assert (!TARGET_NO_TOC);
29904 /* When the linker won't eliminate them, don't output duplicate
29905 TOC entries (this happens on AIX if there is any kind of TOC,
29906 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29907 CODE_LABELs. */
29908 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29910 struct toc_hash_struct *h;
29912 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29913 time because GGC is not initialized at that point. */
29914 if (toc_hash_table == NULL)
29915 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29917 h = ggc_alloc<toc_hash_struct> ();
29918 h->key = x;
29919 h->key_mode = mode;
29920 h->labelno = labelno;
29922 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29923 if (*found == NULL)
29924 *found = h;
29925 else /* This is indeed a duplicate.
29926 Set this label equal to that label. */
29928 fputs ("\t.set ", file);
29929 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29930 fprintf (file, "%d,", labelno);
29931 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29932 fprintf (file, "%d\n", ((*found)->labelno));
29934 #ifdef HAVE_AS_TLS
29935 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29936 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29937 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29939 fputs ("\t.set ", file);
29940 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29941 fprintf (file, "%d,", labelno);
29942 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29943 fprintf (file, "%d\n", ((*found)->labelno));
29945 #endif
29946 return;
29950 /* If we're going to put a double constant in the TOC, make sure it's
29951 aligned properly when strict alignment is on. */
29952 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29953 && STRICT_ALIGNMENT
29954 && GET_MODE_BITSIZE (mode) >= 64
29955 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29956 ASM_OUTPUT_ALIGN (file, 3);
29959 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29961 /* Handle FP constants specially. Note that if we have a minimal
29962 TOC, things we put here aren't actually in the TOC, so we can allow
29963 FP constants. */
29964 if (GET_CODE (x) == CONST_DOUBLE &&
29965 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29966 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29968 long k[4];
29970 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29971 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29972 else
29973 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29975 if (TARGET_64BIT)
29977 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29978 fputs (DOUBLE_INT_ASM_OP, file);
29979 else
29980 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29981 k[0] & 0xffffffff, k[1] & 0xffffffff,
29982 k[2] & 0xffffffff, k[3] & 0xffffffff);
29983 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29984 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29985 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29986 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29987 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29988 return;
29990 else
29992 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29993 fputs ("\t.long ", file);
29994 else
29995 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29996 k[0] & 0xffffffff, k[1] & 0xffffffff,
29997 k[2] & 0xffffffff, k[3] & 0xffffffff);
29998 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29999 k[0] & 0xffffffff, k[1] & 0xffffffff,
30000 k[2] & 0xffffffff, k[3] & 0xffffffff);
30001 return;
30004 else if (GET_CODE (x) == CONST_DOUBLE &&
30005 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
30007 long k[2];
30009 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30010 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
30011 else
30012 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30014 if (TARGET_64BIT)
30016 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30017 fputs (DOUBLE_INT_ASM_OP, file);
30018 else
30019 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30020 k[0] & 0xffffffff, k[1] & 0xffffffff);
30021 fprintf (file, "0x%lx%08lx\n",
30022 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30023 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
30024 return;
30026 else
30028 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30029 fputs ("\t.long ", file);
30030 else
30031 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30032 k[0] & 0xffffffff, k[1] & 0xffffffff);
30033 fprintf (file, "0x%lx,0x%lx\n",
30034 k[0] & 0xffffffff, k[1] & 0xffffffff);
30035 return;
30038 else if (GET_CODE (x) == CONST_DOUBLE &&
30039 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
30041 long l;
30043 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30044 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
30045 else
30046 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
30048 if (TARGET_64BIT)
30050 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30051 fputs (DOUBLE_INT_ASM_OP, file);
30052 else
30053 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30054 if (WORDS_BIG_ENDIAN)
30055 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
30056 else
30057 fprintf (file, "0x%lx\n", l & 0xffffffff);
30058 return;
30060 else
30062 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30063 fputs ("\t.long ", file);
30064 else
30065 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30066 fprintf (file, "0x%lx\n", l & 0xffffffff);
30067 return;
30070 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
30072 unsigned HOST_WIDE_INT low;
30073 HOST_WIDE_INT high;
30075 low = INTVAL (x) & 0xffffffff;
30076 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30078 /* TOC entries are always Pmode-sized, so when big-endian
30079 smaller integer constants in the TOC need to be padded.
30080 (This is still a win over putting the constants in
30081 a separate constant pool, because then we'd have
30082 to have both a TOC entry _and_ the actual constant.)
30084 For a 32-bit target, CONST_INT values are loaded and shifted
30085 entirely within `low' and can be stored in one TOC entry. */
30087 /* It would be easy to make this work, but it doesn't now. */
30088 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30090 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30092 low |= high << 32;
30093 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30094 high = (HOST_WIDE_INT) low >> 32;
30095 low &= 0xffffffff;
30098 if (TARGET_64BIT)
30100 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30101 fputs (DOUBLE_INT_ASM_OP, file);
30102 else
30103 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30104 (long) high & 0xffffffff, (long) low & 0xffffffff);
30105 fprintf (file, "0x%lx%08lx\n",
30106 (long) high & 0xffffffff, (long) low & 0xffffffff);
30107 return;
30109 else
30111 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30113 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30114 fputs ("\t.long ", file);
30115 else
30116 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30117 (long) high & 0xffffffff, (long) low & 0xffffffff);
30118 fprintf (file, "0x%lx,0x%lx\n",
30119 (long) high & 0xffffffff, (long) low & 0xffffffff);
30121 else
30123 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30124 fputs ("\t.long ", file);
30125 else
30126 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30127 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30129 return;
30133 if (GET_CODE (x) == CONST)
30135 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30136 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
30138 base = XEXP (XEXP (x, 0), 0);
30139 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30142 switch (GET_CODE (base))
30144 case SYMBOL_REF:
30145 name = XSTR (base, 0);
30146 break;
30148 case LABEL_REF:
30149 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30150 CODE_LABEL_NUMBER (XEXP (base, 0)));
30151 break;
30153 case CODE_LABEL:
30154 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30155 break;
30157 default:
30158 gcc_unreachable ();
30161 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30162 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30163 else
30165 fputs ("\t.tc ", file);
30166 RS6000_OUTPUT_BASENAME (file, name);
30168 if (offset < 0)
30169 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30170 else if (offset)
30171 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30173 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30174 after other TOC symbols, reducing overflow of small TOC access
30175 to [TC] symbols. */
30176 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30177 ? "[TE]," : "[TC],", file);
30180 /* Currently C++ toc references to vtables can be emitted before it
30181 is decided whether the vtable is public or private. If this is
30182 the case, then the linker will eventually complain that there is
30183 a TOC reference to an unknown section. Thus, for vtables only,
30184 we emit the TOC reference to reference the symbol and not the
30185 section. */
30186 if (VTABLE_NAME_P (name))
30188 RS6000_OUTPUT_BASENAME (file, name);
30189 if (offset < 0)
30190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30191 else if (offset > 0)
30192 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30194 else
30195 output_addr_const (file, x);
30197 #if HAVE_AS_TLS
30198 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30200 switch (SYMBOL_REF_TLS_MODEL (base))
30202 case 0:
30203 break;
30204 case TLS_MODEL_LOCAL_EXEC:
30205 fputs ("@le", file);
30206 break;
30207 case TLS_MODEL_INITIAL_EXEC:
30208 fputs ("@ie", file);
30209 break;
30210 /* Use global-dynamic for local-dynamic. */
30211 case TLS_MODEL_GLOBAL_DYNAMIC:
30212 case TLS_MODEL_LOCAL_DYNAMIC:
30213 putc ('\n', file);
30214 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30215 fputs ("\t.tc .", file);
30216 RS6000_OUTPUT_BASENAME (file, name);
30217 fputs ("[TC],", file);
30218 output_addr_const (file, x);
30219 fputs ("@m", file);
30220 break;
30221 default:
30222 gcc_unreachable ();
30225 #endif
30227 putc ('\n', file);
30230 /* Output an assembler pseudo-op to write an ASCII string of N characters
30231 starting at P to FILE.
30233 On the RS/6000, we have to do this using the .byte operation and
30234 write out special characters outside the quoted string.
30235 Also, the assembler is broken; very long strings are truncated,
30236 so we must artificially break them up early. */
30238 void
30239 output_ascii (FILE *file, const char *p, int n)
30241 char c;
30242 int i, count_string;
30243 const char *for_string = "\t.byte \"";
30244 const char *for_decimal = "\t.byte ";
30245 const char *to_close = NULL;
30247 count_string = 0;
30248 for (i = 0; i < n; i++)
30250 c = *p++;
30251 if (c >= ' ' && c < 0177)
30253 if (for_string)
30254 fputs (for_string, file);
30255 putc (c, file);
30257 /* Write two quotes to get one. */
30258 if (c == '"')
30260 putc (c, file);
30261 ++count_string;
30264 for_string = NULL;
30265 for_decimal = "\"\n\t.byte ";
30266 to_close = "\"\n";
30267 ++count_string;
30269 if (count_string >= 512)
30271 fputs (to_close, file);
30273 for_string = "\t.byte \"";
30274 for_decimal = "\t.byte ";
30275 to_close = NULL;
30276 count_string = 0;
30279 else
30281 if (for_decimal)
30282 fputs (for_decimal, file);
30283 fprintf (file, "%d", c);
30285 for_string = "\n\t.byte \"";
30286 for_decimal = ", ";
30287 to_close = "\n";
30288 count_string = 0;
30292 /* Now close the string if we have written one. Then end the line. */
30293 if (to_close)
30294 fputs (to_close, file);
30297 /* Generate a unique section name for FILENAME for a section type
30298 represented by SECTION_DESC. Output goes into BUF.
30300 SECTION_DESC can be any string, as long as it is different for each
30301 possible section type.
30303 We name the section in the same manner as xlc. The name begins with an
30304 underscore followed by the filename (after stripping any leading directory
30305 names) with the last period replaced by the string SECTION_DESC. If
30306 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30307 the name. */
30309 void
30310 rs6000_gen_section_name (char **buf, const char *filename,
30311 const char *section_desc)
30313 const char *q, *after_last_slash, *last_period = 0;
30314 char *p;
30315 int len;
30317 after_last_slash = filename;
30318 for (q = filename; *q; q++)
30320 if (*q == '/')
30321 after_last_slash = q + 1;
30322 else if (*q == '.')
30323 last_period = q;
30326 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30327 *buf = (char *) xmalloc (len);
30329 p = *buf;
30330 *p++ = '_';
30332 for (q = after_last_slash; *q; q++)
30334 if (q == last_period)
30336 strcpy (p, section_desc);
30337 p += strlen (section_desc);
30338 break;
30341 else if (ISALNUM (*q))
30342 *p++ = *q;
30345 if (last_period == 0)
30346 strcpy (p, section_desc);
30347 else
30348 *p = '\0';
30351 /* Emit profile function. */
30353 void
30354 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30356 /* Non-standard profiling for kernels, which just saves LR then calls
30357 _mcount without worrying about arg saves. The idea is to change
30358 the function prologue as little as possible as it isn't easy to
30359 account for arg save/restore code added just for _mcount. */
30360 if (TARGET_PROFILE_KERNEL)
30361 return;
30363 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30365 #ifndef NO_PROFILE_COUNTERS
30366 # define NO_PROFILE_COUNTERS 0
30367 #endif
30368 if (NO_PROFILE_COUNTERS)
30369 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30370 LCT_NORMAL, VOIDmode);
30371 else
30373 char buf[30];
30374 const char *label_name;
30375 rtx fun;
30377 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30378 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30379 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30381 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30382 LCT_NORMAL, VOIDmode, fun, Pmode);
30385 else if (DEFAULT_ABI == ABI_DARWIN)
30387 const char *mcount_name = RS6000_MCOUNT;
30388 int caller_addr_regno = LR_REGNO;
30390 /* Be conservative and always set this, at least for now. */
30391 crtl->uses_pic_offset_table = 1;
30393 #if TARGET_MACHO
30394 /* For PIC code, set up a stub and collect the caller's address
30395 from r0, which is where the prologue puts it. */
30396 if (MACHOPIC_INDIRECT
30397 && crtl->uses_pic_offset_table)
30398 caller_addr_regno = 0;
30399 #endif
30400 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30401 LCT_NORMAL, VOIDmode,
30402 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30406 /* Write function profiler code. */
30408 void
30409 output_function_profiler (FILE *file, int labelno)
30411 char buf[100];
30413 switch (DEFAULT_ABI)
30415 default:
30416 gcc_unreachable ();
30418 case ABI_V4:
30419 if (!TARGET_32BIT)
30421 warning (0, "no profiling of 64-bit code for this ABI");
30422 return;
30424 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30425 fprintf (file, "\tmflr %s\n", reg_names[0]);
30426 if (NO_PROFILE_COUNTERS)
30428 asm_fprintf (file, "\tstw %s,4(%s)\n",
30429 reg_names[0], reg_names[1]);
30431 else if (TARGET_SECURE_PLT && flag_pic)
30433 if (TARGET_LINK_STACK)
30435 char name[32];
30436 get_ppc476_thunk_name (name);
30437 asm_fprintf (file, "\tbl %s\n", name);
30439 else
30440 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30441 asm_fprintf (file, "\tstw %s,4(%s)\n",
30442 reg_names[0], reg_names[1]);
30443 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30444 asm_fprintf (file, "\taddis %s,%s,",
30445 reg_names[12], reg_names[12]);
30446 assemble_name (file, buf);
30447 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30448 assemble_name (file, buf);
30449 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30451 else if (flag_pic == 1)
30453 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30454 asm_fprintf (file, "\tstw %s,4(%s)\n",
30455 reg_names[0], reg_names[1]);
30456 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30457 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30458 assemble_name (file, buf);
30459 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30461 else if (flag_pic > 1)
30463 asm_fprintf (file, "\tstw %s,4(%s)\n",
30464 reg_names[0], reg_names[1]);
30465 /* Now, we need to get the address of the label. */
30466 if (TARGET_LINK_STACK)
30468 char name[32];
30469 get_ppc476_thunk_name (name);
30470 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30471 assemble_name (file, buf);
30472 fputs ("-.\n1:", file);
30473 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30474 asm_fprintf (file, "\taddi %s,%s,4\n",
30475 reg_names[11], reg_names[11]);
30477 else
30479 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30480 assemble_name (file, buf);
30481 fputs ("-.\n1:", file);
30482 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30484 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30485 reg_names[0], reg_names[11]);
30486 asm_fprintf (file, "\tadd %s,%s,%s\n",
30487 reg_names[0], reg_names[0], reg_names[11]);
30489 else
30491 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30492 assemble_name (file, buf);
30493 fputs ("@ha\n", file);
30494 asm_fprintf (file, "\tstw %s,4(%s)\n",
30495 reg_names[0], reg_names[1]);
30496 asm_fprintf (file, "\tla %s,", reg_names[0]);
30497 assemble_name (file, buf);
30498 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30501 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30502 fprintf (file, "\tbl %s%s\n",
30503 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30504 break;
30506 case ABI_AIX:
30507 case ABI_ELFv2:
30508 case ABI_DARWIN:
30509 /* Don't do anything, done in output_profile_hook (). */
30510 break;
30516 /* The following variable value is the last issued insn. */
30518 static rtx_insn *last_scheduled_insn;
30520 /* The following variable helps to balance issuing of load and
30521 store instructions */
30523 static int load_store_pendulum;
30525 /* The following variable helps pair divide insns during scheduling. */
30526 static int divide_cnt;
30527 /* The following variable helps pair and alternate vector and vector load
30528 insns during scheduling. */
30529 static int vec_pairing;
30532 /* Power4 load update and store update instructions are cracked into a
30533 load or store and an integer insn which are executed in the same cycle.
30534 Branches have their own dispatch slot which does not count against the
30535 GCC issue rate, but it changes the program flow so there are no other
30536 instructions to issue in this cycle. */
30538 static int
30539 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30541 last_scheduled_insn = insn;
30542 if (GET_CODE (PATTERN (insn)) == USE
30543 || GET_CODE (PATTERN (insn)) == CLOBBER)
30545 cached_can_issue_more = more;
30546 return cached_can_issue_more;
30549 if (insn_terminates_group_p (insn, current_group))
30551 cached_can_issue_more = 0;
30552 return cached_can_issue_more;
30555 /* If no reservation, but reach here */
30556 if (recog_memoized (insn) < 0)
30557 return more;
30559 if (rs6000_sched_groups)
30561 if (is_microcoded_insn (insn))
30562 cached_can_issue_more = 0;
30563 else if (is_cracked_insn (insn))
30564 cached_can_issue_more = more > 2 ? more - 2 : 0;
30565 else
30566 cached_can_issue_more = more - 1;
30568 return cached_can_issue_more;
30571 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30572 return 0;
30574 cached_can_issue_more = more - 1;
30575 return cached_can_issue_more;
30578 static int
30579 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30581 int r = rs6000_variable_issue_1 (insn, more);
30582 if (verbose)
30583 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30584 return r;
30587 /* Adjust the cost of a scheduling dependency. Return the new cost of
30588 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30590 static int
30591 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30592 unsigned int)
30594 enum attr_type attr_type;
30596 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30597 return cost;
30599 switch (dep_type)
30601 case REG_DEP_TRUE:
30603 /* Data dependency; DEP_INSN writes a register that INSN reads
30604 some cycles later. */
30606 /* Separate a load from a narrower, dependent store. */
30607 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30608 && GET_CODE (PATTERN (insn)) == SET
30609 && GET_CODE (PATTERN (dep_insn)) == SET
30610 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30611 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30612 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30613 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30614 return cost + 14;
30616 attr_type = get_attr_type (insn);
30618 switch (attr_type)
30620 case TYPE_JMPREG:
30621 /* Tell the first scheduling pass about the latency between
30622 a mtctr and bctr (and mtlr and br/blr). The first
30623 scheduling pass will not know about this latency since
30624 the mtctr instruction, which has the latency associated
30625 to it, will be generated by reload. */
30626 return 4;
30627 case TYPE_BRANCH:
30628 /* Leave some extra cycles between a compare and its
30629 dependent branch, to inhibit expensive mispredicts. */
30630 if ((rs6000_tune == PROCESSOR_PPC603
30631 || rs6000_tune == PROCESSOR_PPC604
30632 || rs6000_tune == PROCESSOR_PPC604e
30633 || rs6000_tune == PROCESSOR_PPC620
30634 || rs6000_tune == PROCESSOR_PPC630
30635 || rs6000_tune == PROCESSOR_PPC750
30636 || rs6000_tune == PROCESSOR_PPC7400
30637 || rs6000_tune == PROCESSOR_PPC7450
30638 || rs6000_tune == PROCESSOR_PPCE5500
30639 || rs6000_tune == PROCESSOR_PPCE6500
30640 || rs6000_tune == PROCESSOR_POWER4
30641 || rs6000_tune == PROCESSOR_POWER5
30642 || rs6000_tune == PROCESSOR_POWER7
30643 || rs6000_tune == PROCESSOR_POWER8
30644 || rs6000_tune == PROCESSOR_POWER9
30645 || rs6000_tune == PROCESSOR_CELL)
30646 && recog_memoized (dep_insn)
30647 && (INSN_CODE (dep_insn) >= 0))
30649 switch (get_attr_type (dep_insn))
30651 case TYPE_CMP:
30652 case TYPE_FPCOMPARE:
30653 case TYPE_CR_LOGICAL:
30654 return cost + 2;
30655 case TYPE_EXTS:
30656 case TYPE_MUL:
30657 if (get_attr_dot (dep_insn) == DOT_YES)
30658 return cost + 2;
30659 else
30660 break;
30661 case TYPE_SHIFT:
30662 if (get_attr_dot (dep_insn) == DOT_YES
30663 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30664 return cost + 2;
30665 else
30666 break;
30667 default:
30668 break;
30670 break;
30672 case TYPE_STORE:
30673 case TYPE_FPSTORE:
30674 if ((rs6000_tune == PROCESSOR_POWER6)
30675 && recog_memoized (dep_insn)
30676 && (INSN_CODE (dep_insn) >= 0))
30679 if (GET_CODE (PATTERN (insn)) != SET)
30680 /* If this happens, we have to extend this to schedule
30681 optimally. Return default for now. */
30682 return cost;
30684 /* Adjust the cost for the case where the value written
30685 by a fixed point operation is used as the address
30686 gen value on a store. */
30687 switch (get_attr_type (dep_insn))
30689 case TYPE_LOAD:
30690 case TYPE_CNTLZ:
30692 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30693 return get_attr_sign_extend (dep_insn)
30694 == SIGN_EXTEND_YES ? 6 : 4;
30695 break;
30697 case TYPE_SHIFT:
30699 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30700 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30701 6 : 3;
30702 break;
30704 case TYPE_INTEGER:
30705 case TYPE_ADD:
30706 case TYPE_LOGICAL:
30707 case TYPE_EXTS:
30708 case TYPE_INSERT:
30710 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30711 return 3;
30712 break;
30714 case TYPE_STORE:
30715 case TYPE_FPLOAD:
30716 case TYPE_FPSTORE:
30718 if (get_attr_update (dep_insn) == UPDATE_YES
30719 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30720 return 3;
30721 break;
30723 case TYPE_MUL:
30725 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30726 return 17;
30727 break;
30729 case TYPE_DIV:
30731 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30732 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30733 break;
30735 default:
30736 break;
30739 break;
30741 case TYPE_LOAD:
30742 if ((rs6000_tune == PROCESSOR_POWER6)
30743 && recog_memoized (dep_insn)
30744 && (INSN_CODE (dep_insn) >= 0))
30747 /* Adjust the cost for the case where the value written
30748 by a fixed point instruction is used within the address
30749 gen portion of a subsequent load(u)(x) */
30750 switch (get_attr_type (dep_insn))
30752 case TYPE_LOAD:
30753 case TYPE_CNTLZ:
30755 if (set_to_load_agen (dep_insn, insn))
30756 return get_attr_sign_extend (dep_insn)
30757 == SIGN_EXTEND_YES ? 6 : 4;
30758 break;
30760 case TYPE_SHIFT:
30762 if (set_to_load_agen (dep_insn, insn))
30763 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30764 6 : 3;
30765 break;
30767 case TYPE_INTEGER:
30768 case TYPE_ADD:
30769 case TYPE_LOGICAL:
30770 case TYPE_EXTS:
30771 case TYPE_INSERT:
30773 if (set_to_load_agen (dep_insn, insn))
30774 return 3;
30775 break;
30777 case TYPE_STORE:
30778 case TYPE_FPLOAD:
30779 case TYPE_FPSTORE:
30781 if (get_attr_update (dep_insn) == UPDATE_YES
30782 && set_to_load_agen (dep_insn, insn))
30783 return 3;
30784 break;
30786 case TYPE_MUL:
30788 if (set_to_load_agen (dep_insn, insn))
30789 return 17;
30790 break;
30792 case TYPE_DIV:
30794 if (set_to_load_agen (dep_insn, insn))
30795 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30796 break;
30798 default:
30799 break;
30802 break;
30804 case TYPE_FPLOAD:
30805 if ((rs6000_tune == PROCESSOR_POWER6)
30806 && get_attr_update (insn) == UPDATE_NO
30807 && recog_memoized (dep_insn)
30808 && (INSN_CODE (dep_insn) >= 0)
30809 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30810 return 2;
30812 default:
30813 break;
30816 /* Fall out to return default cost. */
30818 break;
30820 case REG_DEP_OUTPUT:
30821 /* Output dependency; DEP_INSN writes a register that INSN writes some
30822 cycles later. */
30823 if ((rs6000_tune == PROCESSOR_POWER6)
30824 && recog_memoized (dep_insn)
30825 && (INSN_CODE (dep_insn) >= 0))
30827 attr_type = get_attr_type (insn);
30829 switch (attr_type)
30831 case TYPE_FP:
30832 case TYPE_FPSIMPLE:
30833 if (get_attr_type (dep_insn) == TYPE_FP
30834 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30835 return 1;
30836 break;
30837 case TYPE_FPLOAD:
30838 if (get_attr_update (insn) == UPDATE_NO
30839 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30840 return 2;
30841 break;
30842 default:
30843 break;
30846 /* Fall through, no cost for output dependency. */
30847 /* FALLTHRU */
30849 case REG_DEP_ANTI:
30850 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30851 cycles later. */
30852 return 0;
30854 default:
30855 gcc_unreachable ();
30858 return cost;
30861 /* Debug version of rs6000_adjust_cost. */
30863 static int
30864 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30865 int cost, unsigned int dw)
30867 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30869 if (ret != cost)
30871 const char *dep;
30873 switch (dep_type)
30875 default: dep = "unknown depencency"; break;
30876 case REG_DEP_TRUE: dep = "data dependency"; break;
30877 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30878 case REG_DEP_ANTI: dep = "anti depencency"; break;
30881 fprintf (stderr,
30882 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30883 "%s, insn:\n", ret, cost, dep);
30885 debug_rtx (insn);
30888 return ret;
30891 /* The function returns a true if INSN is microcoded.
30892 Return false otherwise. */
30894 static bool
30895 is_microcoded_insn (rtx_insn *insn)
30897 if (!insn || !NONDEBUG_INSN_P (insn)
30898 || GET_CODE (PATTERN (insn)) == USE
30899 || GET_CODE (PATTERN (insn)) == CLOBBER)
30900 return false;
30902 if (rs6000_tune == PROCESSOR_CELL)
30903 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30905 if (rs6000_sched_groups
30906 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30908 enum attr_type type = get_attr_type (insn);
30909 if ((type == TYPE_LOAD
30910 && get_attr_update (insn) == UPDATE_YES
30911 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30912 || ((type == TYPE_LOAD || type == TYPE_STORE)
30913 && get_attr_update (insn) == UPDATE_YES
30914 && get_attr_indexed (insn) == INDEXED_YES)
30915 || type == TYPE_MFCR)
30916 return true;
30919 return false;
30922 /* The function returns true if INSN is cracked into 2 instructions
30923 by the processor (and therefore occupies 2 issue slots). */
30925 static bool
30926 is_cracked_insn (rtx_insn *insn)
30928 if (!insn || !NONDEBUG_INSN_P (insn)
30929 || GET_CODE (PATTERN (insn)) == USE
30930 || GET_CODE (PATTERN (insn)) == CLOBBER)
30931 return false;
30933 if (rs6000_sched_groups
30934 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30936 enum attr_type type = get_attr_type (insn);
30937 if ((type == TYPE_LOAD
30938 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30939 && get_attr_update (insn) == UPDATE_NO)
30940 || (type == TYPE_LOAD
30941 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30942 && get_attr_update (insn) == UPDATE_YES
30943 && get_attr_indexed (insn) == INDEXED_NO)
30944 || (type == TYPE_STORE
30945 && get_attr_update (insn) == UPDATE_YES
30946 && get_attr_indexed (insn) == INDEXED_NO)
30947 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30948 && get_attr_update (insn) == UPDATE_YES)
30949 || (type == TYPE_CR_LOGICAL
30950 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30951 || (type == TYPE_EXTS
30952 && get_attr_dot (insn) == DOT_YES)
30953 || (type == TYPE_SHIFT
30954 && get_attr_dot (insn) == DOT_YES
30955 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30956 || (type == TYPE_MUL
30957 && get_attr_dot (insn) == DOT_YES)
30958 || type == TYPE_DIV
30959 || (type == TYPE_INSERT
30960 && get_attr_size (insn) == SIZE_32))
30961 return true;
30964 return false;
30967 /* The function returns true if INSN can be issued only from
30968 the branch slot. */
30970 static bool
30971 is_branch_slot_insn (rtx_insn *insn)
30973 if (!insn || !NONDEBUG_INSN_P (insn)
30974 || GET_CODE (PATTERN (insn)) == USE
30975 || GET_CODE (PATTERN (insn)) == CLOBBER)
30976 return false;
30978 if (rs6000_sched_groups)
30980 enum attr_type type = get_attr_type (insn);
30981 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30982 return true;
30983 return false;
30986 return false;
30989 /* The function returns true if out_inst sets a value that is
30990 used in the address generation computation of in_insn */
30991 static bool
30992 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30994 rtx out_set, in_set;
30996 /* For performance reasons, only handle the simple case where
30997 both loads are a single_set. */
30998 out_set = single_set (out_insn);
30999 if (out_set)
31001 in_set = single_set (in_insn);
31002 if (in_set)
31003 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
31006 return false;
31009 /* Try to determine base/offset/size parts of the given MEM.
31010 Return true if successful, false if all the values couldn't
31011 be determined.
31013 This function only looks for REG or REG+CONST address forms.
31014 REG+REG address form will return false. */
31016 static bool
31017 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
31018 HOST_WIDE_INT *size)
31020 rtx addr_rtx;
31021 if MEM_SIZE_KNOWN_P (mem)
31022 *size = MEM_SIZE (mem);
31023 else
31024 return false;
31026 addr_rtx = (XEXP (mem, 0));
31027 if (GET_CODE (addr_rtx) == PRE_MODIFY)
31028 addr_rtx = XEXP (addr_rtx, 1);
31030 *offset = 0;
31031 while (GET_CODE (addr_rtx) == PLUS
31032 && CONST_INT_P (XEXP (addr_rtx, 1)))
31034 *offset += INTVAL (XEXP (addr_rtx, 1));
31035 addr_rtx = XEXP (addr_rtx, 0);
31037 if (!REG_P (addr_rtx))
31038 return false;
31040 *base = addr_rtx;
31041 return true;
31044 /* The function returns true if the target storage location of
31045 mem1 is adjacent to the target storage location of mem2 */
31046 /* Return 1 if memory locations are adjacent. */
31048 static bool
31049 adjacent_mem_locations (rtx mem1, rtx mem2)
31051 rtx reg1, reg2;
31052 HOST_WIDE_INT off1, size1, off2, size2;
31054 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31055 && get_memref_parts (mem2, &reg2, &off2, &size2))
31056 return ((REGNO (reg1) == REGNO (reg2))
31057 && ((off1 + size1 == off2)
31058 || (off2 + size2 == off1)));
31060 return false;
31063 /* This function returns true if it can be determined that the two MEM
31064 locations overlap by at least 1 byte based on base reg/offset/size. */
31066 static bool
31067 mem_locations_overlap (rtx mem1, rtx mem2)
31069 rtx reg1, reg2;
31070 HOST_WIDE_INT off1, size1, off2, size2;
31072 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31073 && get_memref_parts (mem2, &reg2, &off2, &size2))
31074 return ((REGNO (reg1) == REGNO (reg2))
31075 && (((off1 <= off2) && (off1 + size1 > off2))
31076 || ((off2 <= off1) && (off2 + size2 > off1))));
31078 return false;
31081 /* A C statement (sans semicolon) to update the integer scheduling
31082 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31083 INSN earlier, reduce the priority to execute INSN later. Do not
31084 define this macro if you do not need to adjust the scheduling
31085 priorities of insns. */
31087 static int
31088 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31090 rtx load_mem, str_mem;
31091 /* On machines (like the 750) which have asymmetric integer units,
31092 where one integer unit can do multiply and divides and the other
31093 can't, reduce the priority of multiply/divide so it is scheduled
31094 before other integer operations. */
31096 #if 0
31097 if (! INSN_P (insn))
31098 return priority;
31100 if (GET_CODE (PATTERN (insn)) == USE)
31101 return priority;
31103 switch (rs6000_tune) {
31104 case PROCESSOR_PPC750:
31105 switch (get_attr_type (insn))
31107 default:
31108 break;
31110 case TYPE_MUL:
31111 case TYPE_DIV:
31112 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31113 priority, priority);
31114 if (priority >= 0 && priority < 0x01000000)
31115 priority >>= 3;
31116 break;
31119 #endif
31121 if (insn_must_be_first_in_group (insn)
31122 && reload_completed
31123 && current_sched_info->sched_max_insns_priority
31124 && rs6000_sched_restricted_insns_priority)
31127 /* Prioritize insns that can be dispatched only in the first
31128 dispatch slot. */
31129 if (rs6000_sched_restricted_insns_priority == 1)
31130 /* Attach highest priority to insn. This means that in
31131 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31132 precede 'priority' (critical path) considerations. */
31133 return current_sched_info->sched_max_insns_priority;
31134 else if (rs6000_sched_restricted_insns_priority == 2)
31135 /* Increase priority of insn by a minimal amount. This means that in
31136 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31137 considerations precede dispatch-slot restriction considerations. */
31138 return (priority + 1);
31141 if (rs6000_tune == PROCESSOR_POWER6
31142 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31143 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31144 /* Attach highest priority to insn if the scheduler has just issued two
31145 stores and this instruction is a load, or two loads and this instruction
31146 is a store. Power6 wants loads and stores scheduled alternately
31147 when possible */
31148 return current_sched_info->sched_max_insns_priority;
31150 return priority;
31153 /* Return true if the instruction is nonpipelined on the Cell. */
31154 static bool
31155 is_nonpipeline_insn (rtx_insn *insn)
31157 enum attr_type type;
31158 if (!insn || !NONDEBUG_INSN_P (insn)
31159 || GET_CODE (PATTERN (insn)) == USE
31160 || GET_CODE (PATTERN (insn)) == CLOBBER)
31161 return false;
31163 type = get_attr_type (insn);
31164 if (type == TYPE_MUL
31165 || type == TYPE_DIV
31166 || type == TYPE_SDIV
31167 || type == TYPE_DDIV
31168 || type == TYPE_SSQRT
31169 || type == TYPE_DSQRT
31170 || type == TYPE_MFCR
31171 || type == TYPE_MFCRF
31172 || type == TYPE_MFJMPR)
31174 return true;
31176 return false;
31180 /* Return how many instructions the machine can issue per cycle. */
31182 static int
31183 rs6000_issue_rate (void)
31185 /* Unless scheduling for register pressure, use issue rate of 1 for
31186 first scheduling pass to decrease degradation. */
31187 if (!reload_completed && !flag_sched_pressure)
31188 return 1;
31190 switch (rs6000_tune) {
31191 case PROCESSOR_RS64A:
31192 case PROCESSOR_PPC601: /* ? */
31193 case PROCESSOR_PPC7450:
31194 return 3;
31195 case PROCESSOR_PPC440:
31196 case PROCESSOR_PPC603:
31197 case PROCESSOR_PPC750:
31198 case PROCESSOR_PPC7400:
31199 case PROCESSOR_PPC8540:
31200 case PROCESSOR_PPC8548:
31201 case PROCESSOR_CELL:
31202 case PROCESSOR_PPCE300C2:
31203 case PROCESSOR_PPCE300C3:
31204 case PROCESSOR_PPCE500MC:
31205 case PROCESSOR_PPCE500MC64:
31206 case PROCESSOR_PPCE5500:
31207 case PROCESSOR_PPCE6500:
31208 case PROCESSOR_TITAN:
31209 return 2;
31210 case PROCESSOR_PPC476:
31211 case PROCESSOR_PPC604:
31212 case PROCESSOR_PPC604e:
31213 case PROCESSOR_PPC620:
31214 case PROCESSOR_PPC630:
31215 return 4;
31216 case PROCESSOR_POWER4:
31217 case PROCESSOR_POWER5:
31218 case PROCESSOR_POWER6:
31219 case PROCESSOR_POWER7:
31220 return 5;
31221 case PROCESSOR_POWER8:
31222 return 7;
31223 case PROCESSOR_POWER9:
31224 return 6;
31225 default:
31226 return 1;
31230 /* Return how many instructions to look ahead for better insn
31231 scheduling. */
31233 static int
31234 rs6000_use_sched_lookahead (void)
31236 switch (rs6000_tune)
31238 case PROCESSOR_PPC8540:
31239 case PROCESSOR_PPC8548:
31240 return 4;
31242 case PROCESSOR_CELL:
31243 return (reload_completed ? 8 : 0);
31245 default:
31246 return 0;
31250 /* We are choosing insn from the ready queue. Return zero if INSN can be
31251 chosen. */
31252 static int
31253 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31255 if (ready_index == 0)
31256 return 0;
31258 if (rs6000_tune != PROCESSOR_CELL)
31259 return 0;
31261 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31263 if (!reload_completed
31264 || is_nonpipeline_insn (insn)
31265 || is_microcoded_insn (insn))
31266 return 1;
31268 return 0;
31271 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31272 and return true. */
31274 static bool
31275 find_mem_ref (rtx pat, rtx *mem_ref)
31277 const char * fmt;
31278 int i, j;
31280 /* stack_tie does not produce any real memory traffic. */
31281 if (tie_operand (pat, VOIDmode))
31282 return false;
31284 if (GET_CODE (pat) == MEM)
31286 *mem_ref = pat;
31287 return true;
31290 /* Recursively process the pattern. */
31291 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31293 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31295 if (fmt[i] == 'e')
31297 if (find_mem_ref (XEXP (pat, i), mem_ref))
31298 return true;
31300 else if (fmt[i] == 'E')
31301 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31303 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31304 return true;
31308 return false;
31311 /* Determine if PAT is a PATTERN of a load insn. */
31313 static bool
31314 is_load_insn1 (rtx pat, rtx *load_mem)
31316 if (!pat || pat == NULL_RTX)
31317 return false;
31319 if (GET_CODE (pat) == SET)
31320 return find_mem_ref (SET_SRC (pat), load_mem);
31322 if (GET_CODE (pat) == PARALLEL)
31324 int i;
31326 for (i = 0; i < XVECLEN (pat, 0); i++)
31327 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31328 return true;
31331 return false;
31334 /* Determine if INSN loads from memory. */
31336 static bool
31337 is_load_insn (rtx insn, rtx *load_mem)
31339 if (!insn || !INSN_P (insn))
31340 return false;
31342 if (CALL_P (insn))
31343 return false;
31345 return is_load_insn1 (PATTERN (insn), load_mem);
31348 /* Determine if PAT is a PATTERN of a store insn. */
31350 static bool
31351 is_store_insn1 (rtx pat, rtx *str_mem)
31353 if (!pat || pat == NULL_RTX)
31354 return false;
31356 if (GET_CODE (pat) == SET)
31357 return find_mem_ref (SET_DEST (pat), str_mem);
31359 if (GET_CODE (pat) == PARALLEL)
31361 int i;
31363 for (i = 0; i < XVECLEN (pat, 0); i++)
31364 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31365 return true;
31368 return false;
31371 /* Determine if INSN stores to memory. */
31373 static bool
31374 is_store_insn (rtx insn, rtx *str_mem)
31376 if (!insn || !INSN_P (insn))
31377 return false;
31379 return is_store_insn1 (PATTERN (insn), str_mem);
31382 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31384 static bool
31385 is_power9_pairable_vec_type (enum attr_type type)
31387 switch (type)
31389 case TYPE_VECSIMPLE:
31390 case TYPE_VECCOMPLEX:
31391 case TYPE_VECDIV:
31392 case TYPE_VECCMP:
31393 case TYPE_VECPERM:
31394 case TYPE_VECFLOAT:
31395 case TYPE_VECFDIV:
31396 case TYPE_VECDOUBLE:
31397 return true;
31398 default:
31399 break;
31401 return false;
31404 /* Returns whether the dependence between INSN and NEXT is considered
31405 costly by the given target. */
31407 static bool
31408 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31410 rtx insn;
31411 rtx next;
31412 rtx load_mem, str_mem;
31414 /* If the flag is not enabled - no dependence is considered costly;
31415 allow all dependent insns in the same group.
31416 This is the most aggressive option. */
31417 if (rs6000_sched_costly_dep == no_dep_costly)
31418 return false;
31420 /* If the flag is set to 1 - a dependence is always considered costly;
31421 do not allow dependent instructions in the same group.
31422 This is the most conservative option. */
31423 if (rs6000_sched_costly_dep == all_deps_costly)
31424 return true;
31426 insn = DEP_PRO (dep);
31427 next = DEP_CON (dep);
31429 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31430 && is_load_insn (next, &load_mem)
31431 && is_store_insn (insn, &str_mem))
31432 /* Prevent load after store in the same group. */
31433 return true;
31435 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31436 && is_load_insn (next, &load_mem)
31437 && is_store_insn (insn, &str_mem)
31438 && DEP_TYPE (dep) == REG_DEP_TRUE
31439 && mem_locations_overlap(str_mem, load_mem))
31440 /* Prevent load after store in the same group if it is a true
31441 dependence. */
31442 return true;
31444 /* The flag is set to X; dependences with latency >= X are considered costly,
31445 and will not be scheduled in the same group. */
31446 if (rs6000_sched_costly_dep <= max_dep_latency
31447 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31448 return true;
31450 return false;
31453 /* Return the next insn after INSN that is found before TAIL is reached,
31454 skipping any "non-active" insns - insns that will not actually occupy
31455 an issue slot. Return NULL_RTX if such an insn is not found. */
31457 static rtx_insn *
31458 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31460 if (insn == NULL_RTX || insn == tail)
31461 return NULL;
31463 while (1)
31465 insn = NEXT_INSN (insn);
31466 if (insn == NULL_RTX || insn == tail)
31467 return NULL;
31469 if (CALL_P (insn)
31470 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31471 || (NONJUMP_INSN_P (insn)
31472 && GET_CODE (PATTERN (insn)) != USE
31473 && GET_CODE (PATTERN (insn)) != CLOBBER
31474 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31475 break;
31477 return insn;
31480 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31482 static int
31483 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31485 int pos;
31486 int i;
31487 rtx_insn *tmp;
31488 enum attr_type type, type2;
31490 type = get_attr_type (last_scheduled_insn);
31492 /* Try to issue fixed point divides back-to-back in pairs so they will be
31493 routed to separate execution units and execute in parallel. */
31494 if (type == TYPE_DIV && divide_cnt == 0)
31496 /* First divide has been scheduled. */
31497 divide_cnt = 1;
31499 /* Scan the ready list looking for another divide, if found move it
31500 to the end of the list so it is chosen next. */
31501 pos = lastpos;
31502 while (pos >= 0)
31504 if (recog_memoized (ready[pos]) >= 0
31505 && get_attr_type (ready[pos]) == TYPE_DIV)
31507 tmp = ready[pos];
31508 for (i = pos; i < lastpos; i++)
31509 ready[i] = ready[i + 1];
31510 ready[lastpos] = tmp;
31511 break;
31513 pos--;
31516 else
31518 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31519 divide_cnt = 0;
31521 /* The best dispatch throughput for vector and vector load insns can be
31522 achieved by interleaving a vector and vector load such that they'll
31523 dispatch to the same superslice. If this pairing cannot be achieved
31524 then it is best to pair vector insns together and vector load insns
31525 together.
31527 To aid in this pairing, vec_pairing maintains the current state with
31528 the following values:
31530 0 : Initial state, no vecload/vector pairing has been started.
31532 1 : A vecload or vector insn has been issued and a candidate for
31533 pairing has been found and moved to the end of the ready
31534 list. */
31535 if (type == TYPE_VECLOAD)
31537 /* Issued a vecload. */
31538 if (vec_pairing == 0)
31540 int vecload_pos = -1;
31541 /* We issued a single vecload, look for a vector insn to pair it
31542 with. If one isn't found, try to pair another vecload. */
31543 pos = lastpos;
31544 while (pos >= 0)
31546 if (recog_memoized (ready[pos]) >= 0)
31548 type2 = get_attr_type (ready[pos]);
31549 if (is_power9_pairable_vec_type (type2))
31551 /* Found a vector insn to pair with, move it to the
31552 end of the ready list so it is scheduled next. */
31553 tmp = ready[pos];
31554 for (i = pos; i < lastpos; i++)
31555 ready[i] = ready[i + 1];
31556 ready[lastpos] = tmp;
31557 vec_pairing = 1;
31558 return cached_can_issue_more;
31560 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31561 /* Remember position of first vecload seen. */
31562 vecload_pos = pos;
31564 pos--;
31566 if (vecload_pos >= 0)
31568 /* Didn't find a vector to pair with but did find a vecload,
31569 move it to the end of the ready list. */
31570 tmp = ready[vecload_pos];
31571 for (i = vecload_pos; i < lastpos; i++)
31572 ready[i] = ready[i + 1];
31573 ready[lastpos] = tmp;
31574 vec_pairing = 1;
31575 return cached_can_issue_more;
31579 else if (is_power9_pairable_vec_type (type))
31581 /* Issued a vector operation. */
31582 if (vec_pairing == 0)
31584 int vec_pos = -1;
31585 /* We issued a single vector insn, look for a vecload to pair it
31586 with. If one isn't found, try to pair another vector. */
31587 pos = lastpos;
31588 while (pos >= 0)
31590 if (recog_memoized (ready[pos]) >= 0)
31592 type2 = get_attr_type (ready[pos]);
31593 if (type2 == TYPE_VECLOAD)
31595 /* Found a vecload insn to pair with, move it to the
31596 end of the ready list so it is scheduled next. */
31597 tmp = ready[pos];
31598 for (i = pos; i < lastpos; i++)
31599 ready[i] = ready[i + 1];
31600 ready[lastpos] = tmp;
31601 vec_pairing = 1;
31602 return cached_can_issue_more;
31604 else if (is_power9_pairable_vec_type (type2)
31605 && vec_pos == -1)
31606 /* Remember position of first vector insn seen. */
31607 vec_pos = pos;
31609 pos--;
31611 if (vec_pos >= 0)
31613 /* Didn't find a vecload to pair with but did find a vector
31614 insn, move it to the end of the ready list. */
31615 tmp = ready[vec_pos];
31616 for (i = vec_pos; i < lastpos; i++)
31617 ready[i] = ready[i + 1];
31618 ready[lastpos] = tmp;
31619 vec_pairing = 1;
31620 return cached_can_issue_more;
31625 /* We've either finished a vec/vecload pair, couldn't find an insn to
31626 continue the current pair, or the last insn had nothing to do with
31627 with pairing. In any case, reset the state. */
31628 vec_pairing = 0;
31631 return cached_can_issue_more;
31634 /* We are about to begin issuing insns for this clock cycle. */
31636 static int
31637 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31638 rtx_insn **ready ATTRIBUTE_UNUSED,
31639 int *pn_ready ATTRIBUTE_UNUSED,
31640 int clock_var ATTRIBUTE_UNUSED)
31642 int n_ready = *pn_ready;
31644 if (sched_verbose)
31645 fprintf (dump, "// rs6000_sched_reorder :\n");
31647 /* Reorder the ready list, if the second to last ready insn
31648 is a nonepipeline insn. */
31649 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31651 if (is_nonpipeline_insn (ready[n_ready - 1])
31652 && (recog_memoized (ready[n_ready - 2]) > 0))
31653 /* Simply swap first two insns. */
31654 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31657 if (rs6000_tune == PROCESSOR_POWER6)
31658 load_store_pendulum = 0;
31660 return rs6000_issue_rate ();
31663 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31665 static int
31666 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31667 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31669 if (sched_verbose)
31670 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31672 /* For Power6, we need to handle some special cases to try and keep the
31673 store queue from overflowing and triggering expensive flushes.
31675 This code monitors how load and store instructions are being issued
31676 and skews the ready list one way or the other to increase the likelihood
31677 that a desired instruction is issued at the proper time.
31679 A couple of things are done. First, we maintain a "load_store_pendulum"
31680 to track the current state of load/store issue.
31682 - If the pendulum is at zero, then no loads or stores have been
31683 issued in the current cycle so we do nothing.
31685 - If the pendulum is 1, then a single load has been issued in this
31686 cycle and we attempt to locate another load in the ready list to
31687 issue with it.
31689 - If the pendulum is -2, then two stores have already been
31690 issued in this cycle, so we increase the priority of the first load
31691 in the ready list to increase it's likelihood of being chosen first
31692 in the next cycle.
31694 - If the pendulum is -1, then a single store has been issued in this
31695 cycle and we attempt to locate another store in the ready list to
31696 issue with it, preferring a store to an adjacent memory location to
31697 facilitate store pairing in the store queue.
31699 - If the pendulum is 2, then two loads have already been
31700 issued in this cycle, so we increase the priority of the first store
31701 in the ready list to increase it's likelihood of being chosen first
31702 in the next cycle.
31704 - If the pendulum < -2 or > 2, then do nothing.
31706 Note: This code covers the most common scenarios. There exist non
31707 load/store instructions which make use of the LSU and which
31708 would need to be accounted for to strictly model the behavior
31709 of the machine. Those instructions are currently unaccounted
31710 for to help minimize compile time overhead of this code.
31712 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31714 int pos;
31715 int i;
31716 rtx_insn *tmp;
31717 rtx load_mem, str_mem;
31719 if (is_store_insn (last_scheduled_insn, &str_mem))
31720 /* Issuing a store, swing the load_store_pendulum to the left */
31721 load_store_pendulum--;
31722 else if (is_load_insn (last_scheduled_insn, &load_mem))
31723 /* Issuing a load, swing the load_store_pendulum to the right */
31724 load_store_pendulum++;
31725 else
31726 return cached_can_issue_more;
31728 /* If the pendulum is balanced, or there is only one instruction on
31729 the ready list, then all is well, so return. */
31730 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31731 return cached_can_issue_more;
31733 if (load_store_pendulum == 1)
31735 /* A load has been issued in this cycle. Scan the ready list
31736 for another load to issue with it */
31737 pos = *pn_ready-1;
31739 while (pos >= 0)
31741 if (is_load_insn (ready[pos], &load_mem))
31743 /* Found a load. Move it to the head of the ready list,
31744 and adjust it's priority so that it is more likely to
31745 stay there */
31746 tmp = ready[pos];
31747 for (i=pos; i<*pn_ready-1; i++)
31748 ready[i] = ready[i + 1];
31749 ready[*pn_ready-1] = tmp;
31751 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31752 INSN_PRIORITY (tmp)++;
31753 break;
31755 pos--;
31758 else if (load_store_pendulum == -2)
31760 /* Two stores have been issued in this cycle. Increase the
31761 priority of the first load in the ready list to favor it for
31762 issuing in the next cycle. */
31763 pos = *pn_ready-1;
31765 while (pos >= 0)
31767 if (is_load_insn (ready[pos], &load_mem)
31768 && !sel_sched_p ()
31769 && INSN_PRIORITY_KNOWN (ready[pos]))
31771 INSN_PRIORITY (ready[pos])++;
31773 /* Adjust the pendulum to account for the fact that a load
31774 was found and increased in priority. This is to prevent
31775 increasing the priority of multiple loads */
31776 load_store_pendulum--;
31778 break;
31780 pos--;
31783 else if (load_store_pendulum == -1)
31785 /* A store has been issued in this cycle. Scan the ready list for
31786 another store to issue with it, preferring a store to an adjacent
31787 memory location */
31788 int first_store_pos = -1;
31790 pos = *pn_ready-1;
31792 while (pos >= 0)
31794 if (is_store_insn (ready[pos], &str_mem))
31796 rtx str_mem2;
31797 /* Maintain the index of the first store found on the
31798 list */
31799 if (first_store_pos == -1)
31800 first_store_pos = pos;
31802 if (is_store_insn (last_scheduled_insn, &str_mem2)
31803 && adjacent_mem_locations (str_mem, str_mem2))
31805 /* Found an adjacent store. Move it to the head of the
31806 ready list, and adjust it's priority so that it is
31807 more likely to stay there */
31808 tmp = ready[pos];
31809 for (i=pos; i<*pn_ready-1; i++)
31810 ready[i] = ready[i + 1];
31811 ready[*pn_ready-1] = tmp;
31813 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31814 INSN_PRIORITY (tmp)++;
31816 first_store_pos = -1;
31818 break;
31821 pos--;
31824 if (first_store_pos >= 0)
31826 /* An adjacent store wasn't found, but a non-adjacent store was,
31827 so move the non-adjacent store to the front of the ready
31828 list, and adjust its priority so that it is more likely to
31829 stay there. */
31830 tmp = ready[first_store_pos];
31831 for (i=first_store_pos; i<*pn_ready-1; i++)
31832 ready[i] = ready[i + 1];
31833 ready[*pn_ready-1] = tmp;
31834 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31835 INSN_PRIORITY (tmp)++;
31838 else if (load_store_pendulum == 2)
31840 /* Two loads have been issued in this cycle. Increase the priority
31841 of the first store in the ready list to favor it for issuing in
31842 the next cycle. */
31843 pos = *pn_ready-1;
31845 while (pos >= 0)
31847 if (is_store_insn (ready[pos], &str_mem)
31848 && !sel_sched_p ()
31849 && INSN_PRIORITY_KNOWN (ready[pos]))
31851 INSN_PRIORITY (ready[pos])++;
31853 /* Adjust the pendulum to account for the fact that a store
31854 was found and increased in priority. This is to prevent
31855 increasing the priority of multiple stores */
31856 load_store_pendulum++;
31858 break;
31860 pos--;
31865 /* Do Power9 dependent reordering if necessary. */
31866 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31867 && recog_memoized (last_scheduled_insn) >= 0)
31868 return power9_sched_reorder2 (ready, *pn_ready - 1);
31870 return cached_can_issue_more;
31873 /* Return whether the presence of INSN causes a dispatch group termination
31874 of group WHICH_GROUP.
31876 If WHICH_GROUP == current_group, this function will return true if INSN
31877 causes the termination of the current group (i.e, the dispatch group to
31878 which INSN belongs). This means that INSN will be the last insn in the
31879 group it belongs to.
31881 If WHICH_GROUP == previous_group, this function will return true if INSN
31882 causes the termination of the previous group (i.e, the dispatch group that
31883 precedes the group to which INSN belongs). This means that INSN will be
31884 the first insn in the group it belongs to). */
31886 static bool
31887 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31889 bool first, last;
31891 if (! insn)
31892 return false;
31894 first = insn_must_be_first_in_group (insn);
31895 last = insn_must_be_last_in_group (insn);
31897 if (first && last)
31898 return true;
31900 if (which_group == current_group)
31901 return last;
31902 else if (which_group == previous_group)
31903 return first;
31905 return false;
31909 static bool
31910 insn_must_be_first_in_group (rtx_insn *insn)
31912 enum attr_type type;
31914 if (!insn
31915 || NOTE_P (insn)
31916 || DEBUG_INSN_P (insn)
31917 || GET_CODE (PATTERN (insn)) == USE
31918 || GET_CODE (PATTERN (insn)) == CLOBBER)
31919 return false;
31921 switch (rs6000_tune)
31923 case PROCESSOR_POWER5:
31924 if (is_cracked_insn (insn))
31925 return true;
31926 /* FALLTHRU */
31927 case PROCESSOR_POWER4:
31928 if (is_microcoded_insn (insn))
31929 return true;
31931 if (!rs6000_sched_groups)
31932 return false;
31934 type = get_attr_type (insn);
31936 switch (type)
31938 case TYPE_MFCR:
31939 case TYPE_MFCRF:
31940 case TYPE_MTCR:
31941 case TYPE_CR_LOGICAL:
31942 case TYPE_MTJMPR:
31943 case TYPE_MFJMPR:
31944 case TYPE_DIV:
31945 case TYPE_LOAD_L:
31946 case TYPE_STORE_C:
31947 case TYPE_ISYNC:
31948 case TYPE_SYNC:
31949 return true;
31950 default:
31951 break;
31953 break;
31954 case PROCESSOR_POWER6:
31955 type = get_attr_type (insn);
31957 switch (type)
31959 case TYPE_EXTS:
31960 case TYPE_CNTLZ:
31961 case TYPE_TRAP:
31962 case TYPE_MUL:
31963 case TYPE_INSERT:
31964 case TYPE_FPCOMPARE:
31965 case TYPE_MFCR:
31966 case TYPE_MTCR:
31967 case TYPE_MFJMPR:
31968 case TYPE_MTJMPR:
31969 case TYPE_ISYNC:
31970 case TYPE_SYNC:
31971 case TYPE_LOAD_L:
31972 case TYPE_STORE_C:
31973 return true;
31974 case TYPE_SHIFT:
31975 if (get_attr_dot (insn) == DOT_NO
31976 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31977 return true;
31978 else
31979 break;
31980 case TYPE_DIV:
31981 if (get_attr_size (insn) == SIZE_32)
31982 return true;
31983 else
31984 break;
31985 case TYPE_LOAD:
31986 case TYPE_STORE:
31987 case TYPE_FPLOAD:
31988 case TYPE_FPSTORE:
31989 if (get_attr_update (insn) == UPDATE_YES)
31990 return true;
31991 else
31992 break;
31993 default:
31994 break;
31996 break;
31997 case PROCESSOR_POWER7:
31998 type = get_attr_type (insn);
32000 switch (type)
32002 case TYPE_CR_LOGICAL:
32003 case TYPE_MFCR:
32004 case TYPE_MFCRF:
32005 case TYPE_MTCR:
32006 case TYPE_DIV:
32007 case TYPE_ISYNC:
32008 case TYPE_LOAD_L:
32009 case TYPE_STORE_C:
32010 case TYPE_MFJMPR:
32011 case TYPE_MTJMPR:
32012 return true;
32013 case TYPE_MUL:
32014 case TYPE_SHIFT:
32015 case TYPE_EXTS:
32016 if (get_attr_dot (insn) == DOT_YES)
32017 return true;
32018 else
32019 break;
32020 case TYPE_LOAD:
32021 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32022 || get_attr_update (insn) == UPDATE_YES)
32023 return true;
32024 else
32025 break;
32026 case TYPE_STORE:
32027 case TYPE_FPLOAD:
32028 case TYPE_FPSTORE:
32029 if (get_attr_update (insn) == UPDATE_YES)
32030 return true;
32031 else
32032 break;
32033 default:
32034 break;
32036 break;
32037 case PROCESSOR_POWER8:
32038 type = get_attr_type (insn);
32040 switch (type)
32042 case TYPE_CR_LOGICAL:
32043 case TYPE_MFCR:
32044 case TYPE_MFCRF:
32045 case TYPE_MTCR:
32046 case TYPE_SYNC:
32047 case TYPE_ISYNC:
32048 case TYPE_LOAD_L:
32049 case TYPE_STORE_C:
32050 case TYPE_VECSTORE:
32051 case TYPE_MFJMPR:
32052 case TYPE_MTJMPR:
32053 return true;
32054 case TYPE_SHIFT:
32055 case TYPE_EXTS:
32056 case TYPE_MUL:
32057 if (get_attr_dot (insn) == DOT_YES)
32058 return true;
32059 else
32060 break;
32061 case TYPE_LOAD:
32062 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32063 || get_attr_update (insn) == UPDATE_YES)
32064 return true;
32065 else
32066 break;
32067 case TYPE_STORE:
32068 if (get_attr_update (insn) == UPDATE_YES
32069 && get_attr_indexed (insn) == INDEXED_YES)
32070 return true;
32071 else
32072 break;
32073 default:
32074 break;
32076 break;
32077 default:
32078 break;
32081 return false;
32084 static bool
32085 insn_must_be_last_in_group (rtx_insn *insn)
32087 enum attr_type type;
32089 if (!insn
32090 || NOTE_P (insn)
32091 || DEBUG_INSN_P (insn)
32092 || GET_CODE (PATTERN (insn)) == USE
32093 || GET_CODE (PATTERN (insn)) == CLOBBER)
32094 return false;
32096 switch (rs6000_tune) {
32097 case PROCESSOR_POWER4:
32098 case PROCESSOR_POWER5:
32099 if (is_microcoded_insn (insn))
32100 return true;
32102 if (is_branch_slot_insn (insn))
32103 return true;
32105 break;
32106 case PROCESSOR_POWER6:
32107 type = get_attr_type (insn);
32109 switch (type)
32111 case TYPE_EXTS:
32112 case TYPE_CNTLZ:
32113 case TYPE_TRAP:
32114 case TYPE_MUL:
32115 case TYPE_FPCOMPARE:
32116 case TYPE_MFCR:
32117 case TYPE_MTCR:
32118 case TYPE_MFJMPR:
32119 case TYPE_MTJMPR:
32120 case TYPE_ISYNC:
32121 case TYPE_SYNC:
32122 case TYPE_LOAD_L:
32123 case TYPE_STORE_C:
32124 return true;
32125 case TYPE_SHIFT:
32126 if (get_attr_dot (insn) == DOT_NO
32127 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32128 return true;
32129 else
32130 break;
32131 case TYPE_DIV:
32132 if (get_attr_size (insn) == SIZE_32)
32133 return true;
32134 else
32135 break;
32136 default:
32137 break;
32139 break;
32140 case PROCESSOR_POWER7:
32141 type = get_attr_type (insn);
32143 switch (type)
32145 case TYPE_ISYNC:
32146 case TYPE_SYNC:
32147 case TYPE_LOAD_L:
32148 case TYPE_STORE_C:
32149 return true;
32150 case TYPE_LOAD:
32151 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32152 && get_attr_update (insn) == UPDATE_YES)
32153 return true;
32154 else
32155 break;
32156 case TYPE_STORE:
32157 if (get_attr_update (insn) == UPDATE_YES
32158 && get_attr_indexed (insn) == INDEXED_YES)
32159 return true;
32160 else
32161 break;
32162 default:
32163 break;
32165 break;
32166 case PROCESSOR_POWER8:
32167 type = get_attr_type (insn);
32169 switch (type)
32171 case TYPE_MFCR:
32172 case TYPE_MTCR:
32173 case TYPE_ISYNC:
32174 case TYPE_SYNC:
32175 case TYPE_LOAD_L:
32176 case TYPE_STORE_C:
32177 return true;
32178 case TYPE_LOAD:
32179 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32180 && get_attr_update (insn) == UPDATE_YES)
32181 return true;
32182 else
32183 break;
32184 case TYPE_STORE:
32185 if (get_attr_update (insn) == UPDATE_YES
32186 && get_attr_indexed (insn) == INDEXED_YES)
32187 return true;
32188 else
32189 break;
32190 default:
32191 break;
32193 break;
32194 default:
32195 break;
32198 return false;
32201 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32202 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32204 static bool
32205 is_costly_group (rtx *group_insns, rtx next_insn)
32207 int i;
32208 int issue_rate = rs6000_issue_rate ();
32210 for (i = 0; i < issue_rate; i++)
32212 sd_iterator_def sd_it;
32213 dep_t dep;
32214 rtx insn = group_insns[i];
32216 if (!insn)
32217 continue;
32219 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32221 rtx next = DEP_CON (dep);
32223 if (next == next_insn
32224 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32225 return true;
32229 return false;
32232 /* Utility of the function redefine_groups.
32233 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32234 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32235 to keep it "far" (in a separate group) from GROUP_INSNS, following
32236 one of the following schemes, depending on the value of the flag
32237 -minsert_sched_nops = X:
32238 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32239 in order to force NEXT_INSN into a separate group.
32240 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32241 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32242 insertion (has a group just ended, how many vacant issue slots remain in the
32243 last group, and how many dispatch groups were encountered so far). */
32245 static int
32246 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32247 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32248 int *group_count)
32250 rtx nop;
32251 bool force;
32252 int issue_rate = rs6000_issue_rate ();
32253 bool end = *group_end;
32254 int i;
32256 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32257 return can_issue_more;
32259 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32260 return can_issue_more;
32262 force = is_costly_group (group_insns, next_insn);
32263 if (!force)
32264 return can_issue_more;
32266 if (sched_verbose > 6)
32267 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32268 *group_count ,can_issue_more);
32270 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32272 if (*group_end)
32273 can_issue_more = 0;
32275 /* Since only a branch can be issued in the last issue_slot, it is
32276 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32277 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32278 in this case the last nop will start a new group and the branch
32279 will be forced to the new group. */
32280 if (can_issue_more && !is_branch_slot_insn (next_insn))
32281 can_issue_more--;
32283 /* Do we have a special group ending nop? */
32284 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32285 || rs6000_tune == PROCESSOR_POWER8)
32287 nop = gen_group_ending_nop ();
32288 emit_insn_before (nop, next_insn);
32289 can_issue_more = 0;
32291 else
32292 while (can_issue_more > 0)
32294 nop = gen_nop ();
32295 emit_insn_before (nop, next_insn);
32296 can_issue_more--;
32299 *group_end = true;
32300 return 0;
32303 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32305 int n_nops = rs6000_sched_insert_nops;
32307 /* Nops can't be issued from the branch slot, so the effective
32308 issue_rate for nops is 'issue_rate - 1'. */
32309 if (can_issue_more == 0)
32310 can_issue_more = issue_rate;
32311 can_issue_more--;
32312 if (can_issue_more == 0)
32314 can_issue_more = issue_rate - 1;
32315 (*group_count)++;
32316 end = true;
32317 for (i = 0; i < issue_rate; i++)
32319 group_insns[i] = 0;
32323 while (n_nops > 0)
32325 nop = gen_nop ();
32326 emit_insn_before (nop, next_insn);
32327 if (can_issue_more == issue_rate - 1) /* new group begins */
32328 end = false;
32329 can_issue_more--;
32330 if (can_issue_more == 0)
32332 can_issue_more = issue_rate - 1;
32333 (*group_count)++;
32334 end = true;
32335 for (i = 0; i < issue_rate; i++)
32337 group_insns[i] = 0;
32340 n_nops--;
32343 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32344 can_issue_more++;
32346 /* Is next_insn going to start a new group? */
32347 *group_end
32348 = (end
32349 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32350 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32351 || (can_issue_more < issue_rate &&
32352 insn_terminates_group_p (next_insn, previous_group)));
32353 if (*group_end && end)
32354 (*group_count)--;
32356 if (sched_verbose > 6)
32357 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32358 *group_count, can_issue_more);
32359 return can_issue_more;
32362 return can_issue_more;
32365 /* This function tries to synch the dispatch groups that the compiler "sees"
32366 with the dispatch groups that the processor dispatcher is expected to
32367 form in practice. It tries to achieve this synchronization by forcing the
32368 estimated processor grouping on the compiler (as opposed to the function
32369 'pad_goups' which tries to force the scheduler's grouping on the processor).
32371 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32372 examines the (estimated) dispatch groups that will be formed by the processor
32373 dispatcher. It marks these group boundaries to reflect the estimated
32374 processor grouping, overriding the grouping that the scheduler had marked.
32375 Depending on the value of the flag '-minsert-sched-nops' this function can
32376 force certain insns into separate groups or force a certain distance between
32377 them by inserting nops, for example, if there exists a "costly dependence"
32378 between the insns.
32380 The function estimates the group boundaries that the processor will form as
32381 follows: It keeps track of how many vacant issue slots are available after
32382 each insn. A subsequent insn will start a new group if one of the following
32383 4 cases applies:
32384 - no more vacant issue slots remain in the current dispatch group.
32385 - only the last issue slot, which is the branch slot, is vacant, but the next
32386 insn is not a branch.
32387 - only the last 2 or less issue slots, including the branch slot, are vacant,
32388 which means that a cracked insn (which occupies two issue slots) can't be
32389 issued in this group.
32390 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32391 start a new group. */
32393 static int
32394 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32395 rtx_insn *tail)
32397 rtx_insn *insn, *next_insn;
32398 int issue_rate;
32399 int can_issue_more;
32400 int slot, i;
32401 bool group_end;
32402 int group_count = 0;
32403 rtx *group_insns;
32405 /* Initialize. */
32406 issue_rate = rs6000_issue_rate ();
32407 group_insns = XALLOCAVEC (rtx, issue_rate);
32408 for (i = 0; i < issue_rate; i++)
32410 group_insns[i] = 0;
32412 can_issue_more = issue_rate;
32413 slot = 0;
32414 insn = get_next_active_insn (prev_head_insn, tail);
32415 group_end = false;
32417 while (insn != NULL_RTX)
32419 slot = (issue_rate - can_issue_more);
32420 group_insns[slot] = insn;
32421 can_issue_more =
32422 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32423 if (insn_terminates_group_p (insn, current_group))
32424 can_issue_more = 0;
32426 next_insn = get_next_active_insn (insn, tail);
32427 if (next_insn == NULL_RTX)
32428 return group_count + 1;
32430 /* Is next_insn going to start a new group? */
32431 group_end
32432 = (can_issue_more == 0
32433 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32434 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32435 || (can_issue_more < issue_rate &&
32436 insn_terminates_group_p (next_insn, previous_group)));
32438 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32439 next_insn, &group_end, can_issue_more,
32440 &group_count);
32442 if (group_end)
32444 group_count++;
32445 can_issue_more = 0;
32446 for (i = 0; i < issue_rate; i++)
32448 group_insns[i] = 0;
32452 if (GET_MODE (next_insn) == TImode && can_issue_more)
32453 PUT_MODE (next_insn, VOIDmode);
32454 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32455 PUT_MODE (next_insn, TImode);
32457 insn = next_insn;
32458 if (can_issue_more == 0)
32459 can_issue_more = issue_rate;
32460 } /* while */
32462 return group_count;
32465 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32466 dispatch group boundaries that the scheduler had marked. Pad with nops
32467 any dispatch groups which have vacant issue slots, in order to force the
32468 scheduler's grouping on the processor dispatcher. The function
32469 returns the number of dispatch groups found. */
32471 static int
32472 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32473 rtx_insn *tail)
32475 rtx_insn *insn, *next_insn;
32476 rtx nop;
32477 int issue_rate;
32478 int can_issue_more;
32479 int group_end;
32480 int group_count = 0;
32482 /* Initialize issue_rate. */
32483 issue_rate = rs6000_issue_rate ();
32484 can_issue_more = issue_rate;
32486 insn = get_next_active_insn (prev_head_insn, tail);
32487 next_insn = get_next_active_insn (insn, tail);
32489 while (insn != NULL_RTX)
32491 can_issue_more =
32492 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32494 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32496 if (next_insn == NULL_RTX)
32497 break;
32499 if (group_end)
32501 /* If the scheduler had marked group termination at this location
32502 (between insn and next_insn), and neither insn nor next_insn will
32503 force group termination, pad the group with nops to force group
32504 termination. */
32505 if (can_issue_more
32506 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32507 && !insn_terminates_group_p (insn, current_group)
32508 && !insn_terminates_group_p (next_insn, previous_group))
32510 if (!is_branch_slot_insn (next_insn))
32511 can_issue_more--;
32513 while (can_issue_more)
32515 nop = gen_nop ();
32516 emit_insn_before (nop, next_insn);
32517 can_issue_more--;
32521 can_issue_more = issue_rate;
32522 group_count++;
32525 insn = next_insn;
32526 next_insn = get_next_active_insn (insn, tail);
32529 return group_count;
32532 /* We're beginning a new block. Initialize data structures as necessary. */
32534 static void
32535 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32536 int sched_verbose ATTRIBUTE_UNUSED,
32537 int max_ready ATTRIBUTE_UNUSED)
32539 last_scheduled_insn = NULL;
32540 load_store_pendulum = 0;
32541 divide_cnt = 0;
32542 vec_pairing = 0;
32545 /* The following function is called at the end of scheduling BB.
32546 After reload, it inserts nops at insn group bundling. */
32548 static void
32549 rs6000_sched_finish (FILE *dump, int sched_verbose)
32551 int n_groups;
32553 if (sched_verbose)
32554 fprintf (dump, "=== Finishing schedule.\n");
32556 if (reload_completed && rs6000_sched_groups)
32558 /* Do not run sched_finish hook when selective scheduling enabled. */
32559 if (sel_sched_p ())
32560 return;
32562 if (rs6000_sched_insert_nops == sched_finish_none)
32563 return;
32565 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32566 n_groups = pad_groups (dump, sched_verbose,
32567 current_sched_info->prev_head,
32568 current_sched_info->next_tail);
32569 else
32570 n_groups = redefine_groups (dump, sched_verbose,
32571 current_sched_info->prev_head,
32572 current_sched_info->next_tail);
32574 if (sched_verbose >= 6)
32576 fprintf (dump, "ngroups = %d\n", n_groups);
32577 print_rtl (dump, current_sched_info->prev_head);
32578 fprintf (dump, "Done finish_sched\n");
32583 struct rs6000_sched_context
32585 short cached_can_issue_more;
32586 rtx_insn *last_scheduled_insn;
32587 int load_store_pendulum;
32588 int divide_cnt;
32589 int vec_pairing;
32592 typedef struct rs6000_sched_context rs6000_sched_context_def;
32593 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32595 /* Allocate store for new scheduling context. */
32596 static void *
32597 rs6000_alloc_sched_context (void)
32599 return xmalloc (sizeof (rs6000_sched_context_def));
32602 /* If CLEAN_P is true then initializes _SC with clean data,
32603 and from the global context otherwise. */
32604 static void
32605 rs6000_init_sched_context (void *_sc, bool clean_p)
32607 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32609 if (clean_p)
32611 sc->cached_can_issue_more = 0;
32612 sc->last_scheduled_insn = NULL;
32613 sc->load_store_pendulum = 0;
32614 sc->divide_cnt = 0;
32615 sc->vec_pairing = 0;
32617 else
32619 sc->cached_can_issue_more = cached_can_issue_more;
32620 sc->last_scheduled_insn = last_scheduled_insn;
32621 sc->load_store_pendulum = load_store_pendulum;
32622 sc->divide_cnt = divide_cnt;
32623 sc->vec_pairing = vec_pairing;
32627 /* Sets the global scheduling context to the one pointed to by _SC. */
32628 static void
32629 rs6000_set_sched_context (void *_sc)
32631 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32633 gcc_assert (sc != NULL);
32635 cached_can_issue_more = sc->cached_can_issue_more;
32636 last_scheduled_insn = sc->last_scheduled_insn;
32637 load_store_pendulum = sc->load_store_pendulum;
32638 divide_cnt = sc->divide_cnt;
32639 vec_pairing = sc->vec_pairing;
32642 /* Free _SC. */
32643 static void
32644 rs6000_free_sched_context (void *_sc)
32646 gcc_assert (_sc != NULL);
32648 free (_sc);
32651 static bool
32652 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32654 switch (get_attr_type (insn))
32656 case TYPE_DIV:
32657 case TYPE_SDIV:
32658 case TYPE_DDIV:
32659 case TYPE_VECDIV:
32660 case TYPE_SSQRT:
32661 case TYPE_DSQRT:
32662 return false;
32664 default:
32665 return true;
32669 /* Length in units of the trampoline for entering a nested function. */
32672 rs6000_trampoline_size (void)
32674 int ret = 0;
32676 switch (DEFAULT_ABI)
32678 default:
32679 gcc_unreachable ();
32681 case ABI_AIX:
32682 ret = (TARGET_32BIT) ? 12 : 24;
32683 break;
32685 case ABI_ELFv2:
32686 gcc_assert (!TARGET_32BIT);
32687 ret = 32;
32688 break;
32690 case ABI_DARWIN:
32691 case ABI_V4:
32692 ret = (TARGET_32BIT) ? 40 : 48;
32693 break;
32696 return ret;
32699 /* Emit RTL insns to initialize the variable parts of a trampoline.
32700 FNADDR is an RTX for the address of the function's pure code.
32701 CXT is an RTX for the static chain value for the function. */
32703 static void
32704 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32706 int regsize = (TARGET_32BIT) ? 4 : 8;
32707 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32708 rtx ctx_reg = force_reg (Pmode, cxt);
32709 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32711 switch (DEFAULT_ABI)
32713 default:
32714 gcc_unreachable ();
32716 /* Under AIX, just build the 3 word function descriptor */
32717 case ABI_AIX:
32719 rtx fnmem, fn_reg, toc_reg;
32721 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32722 error ("you cannot take the address of a nested function if you use "
32723 "the %qs option", "-mno-pointers-to-nested-functions");
32725 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32726 fn_reg = gen_reg_rtx (Pmode);
32727 toc_reg = gen_reg_rtx (Pmode);
32729 /* Macro to shorten the code expansions below. */
32730 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32732 m_tramp = replace_equiv_address (m_tramp, addr);
32734 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32735 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32736 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32737 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32738 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32740 # undef MEM_PLUS
32742 break;
32744 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32745 case ABI_ELFv2:
32746 case ABI_DARWIN:
32747 case ABI_V4:
32748 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32749 LCT_NORMAL, VOIDmode,
32750 addr, Pmode,
32751 GEN_INT (rs6000_trampoline_size ()), SImode,
32752 fnaddr, Pmode,
32753 ctx_reg, Pmode);
32754 break;
32759 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32760 identifier as an argument, so the front end shouldn't look it up. */
32762 static bool
32763 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32765 return is_attribute_p ("altivec", attr_id);
32768 /* Handle the "altivec" attribute. The attribute may have
32769 arguments as follows:
32771 __attribute__((altivec(vector__)))
32772 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32773 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32775 and may appear more than once (e.g., 'vector bool char') in a
32776 given declaration. */
32778 static tree
32779 rs6000_handle_altivec_attribute (tree *node,
32780 tree name ATTRIBUTE_UNUSED,
32781 tree args,
32782 int flags ATTRIBUTE_UNUSED,
32783 bool *no_add_attrs)
32785 tree type = *node, result = NULL_TREE;
32786 machine_mode mode;
32787 int unsigned_p;
32788 char altivec_type
32789 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32790 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32791 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32792 : '?');
32794 while (POINTER_TYPE_P (type)
32795 || TREE_CODE (type) == FUNCTION_TYPE
32796 || TREE_CODE (type) == METHOD_TYPE
32797 || TREE_CODE (type) == ARRAY_TYPE)
32798 type = TREE_TYPE (type);
32800 mode = TYPE_MODE (type);
32802 /* Check for invalid AltiVec type qualifiers. */
32803 if (type == long_double_type_node)
32804 error ("use of %<long double%> in AltiVec types is invalid");
32805 else if (type == boolean_type_node)
32806 error ("use of boolean types in AltiVec types is invalid");
32807 else if (TREE_CODE (type) == COMPLEX_TYPE)
32808 error ("use of %<complex%> in AltiVec types is invalid");
32809 else if (DECIMAL_FLOAT_MODE_P (mode))
32810 error ("use of decimal floating point types in AltiVec types is invalid");
32811 else if (!TARGET_VSX)
32813 if (type == long_unsigned_type_node || type == long_integer_type_node)
32815 if (TARGET_64BIT)
32816 error ("use of %<long%> in AltiVec types is invalid for "
32817 "64-bit code without %qs", "-mvsx");
32818 else if (rs6000_warn_altivec_long)
32819 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32820 "use %<int%>");
32822 else if (type == long_long_unsigned_type_node
32823 || type == long_long_integer_type_node)
32824 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32825 "-mvsx");
32826 else if (type == double_type_node)
32827 error ("use of %<double%> in AltiVec types is invalid without %qs",
32828 "-mvsx");
32831 switch (altivec_type)
32833 case 'v':
32834 unsigned_p = TYPE_UNSIGNED (type);
32835 switch (mode)
32837 case E_TImode:
32838 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32839 break;
32840 case E_DImode:
32841 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32842 break;
32843 case E_SImode:
32844 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32845 break;
32846 case E_HImode:
32847 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32848 break;
32849 case E_QImode:
32850 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32851 break;
32852 case E_SFmode: result = V4SF_type_node; break;
32853 case E_DFmode: result = V2DF_type_node; break;
32854 /* If the user says 'vector int bool', we may be handed the 'bool'
32855 attribute _before_ the 'vector' attribute, and so select the
32856 proper type in the 'b' case below. */
32857 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32858 case E_V2DImode: case E_V2DFmode:
32859 result = type;
32860 default: break;
32862 break;
32863 case 'b':
32864 switch (mode)
32866 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32867 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32868 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32869 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32870 default: break;
32872 break;
32873 case 'p':
32874 switch (mode)
32876 case E_V8HImode: result = pixel_V8HI_type_node;
32877 default: break;
32879 default: break;
32882 /* Propagate qualifiers attached to the element type
32883 onto the vector type. */
32884 if (result && result != type && TYPE_QUALS (type))
32885 result = build_qualified_type (result, TYPE_QUALS (type));
32887 *no_add_attrs = true; /* No need to hang on to the attribute. */
32889 if (result)
32890 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32892 return NULL_TREE;
32895 /* AltiVec defines four built-in scalar types that serve as vector
32896 elements; we must teach the compiler how to mangle them. */
32898 static const char *
32899 rs6000_mangle_type (const_tree type)
32901 type = TYPE_MAIN_VARIANT (type);
32903 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32904 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32905 return NULL;
32907 if (type == bool_char_type_node) return "U6__boolc";
32908 if (type == bool_short_type_node) return "U6__bools";
32909 if (type == pixel_type_node) return "u7__pixel";
32910 if (type == bool_int_type_node) return "U6__booli";
32911 if (type == bool_long_long_type_node) return "U6__boolx";
32913 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32914 "g" for IBM extended double, no matter whether it is long double (using
32915 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32916 if (TARGET_FLOAT128_TYPE)
32918 if (type == ieee128_float_type_node)
32919 return "U10__float128";
32921 if (TARGET_LONG_DOUBLE_128)
32923 if (type == long_double_type_node)
32924 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32926 if (type == ibm128_float_type_node)
32927 return "g";
32931 /* Mangle IBM extended float long double as `g' (__float128) on
32932 powerpc*-linux where long-double-64 previously was the default. */
32933 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32934 && TARGET_ELF
32935 && TARGET_LONG_DOUBLE_128
32936 && !TARGET_IEEEQUAD)
32937 return "g";
32939 /* For all other types, use normal C++ mangling. */
32940 return NULL;
32943 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32944 struct attribute_spec.handler. */
32946 static tree
32947 rs6000_handle_longcall_attribute (tree *node, tree name,
32948 tree args ATTRIBUTE_UNUSED,
32949 int flags ATTRIBUTE_UNUSED,
32950 bool *no_add_attrs)
32952 if (TREE_CODE (*node) != FUNCTION_TYPE
32953 && TREE_CODE (*node) != FIELD_DECL
32954 && TREE_CODE (*node) != TYPE_DECL)
32956 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32957 name);
32958 *no_add_attrs = true;
32961 return NULL_TREE;
32964 /* Set longcall attributes on all functions declared when
32965 rs6000_default_long_calls is true. */
32966 static void
32967 rs6000_set_default_type_attributes (tree type)
32969 if (rs6000_default_long_calls
32970 && (TREE_CODE (type) == FUNCTION_TYPE
32971 || TREE_CODE (type) == METHOD_TYPE))
32972 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32973 NULL_TREE,
32974 TYPE_ATTRIBUTES (type));
32976 #if TARGET_MACHO
32977 darwin_set_default_type_attributes (type);
32978 #endif
32981 /* Return a reference suitable for calling a function with the
32982 longcall attribute. */
32985 rs6000_longcall_ref (rtx call_ref)
32987 const char *call_name;
32988 tree node;
32990 if (GET_CODE (call_ref) != SYMBOL_REF)
32991 return call_ref;
32993 /* System V adds '.' to the internal name, so skip them. */
32994 call_name = XSTR (call_ref, 0);
32995 if (*call_name == '.')
32997 while (*call_name == '.')
32998 call_name++;
33000 node = get_identifier (call_name);
33001 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
33004 return force_reg (Pmode, call_ref);
33007 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
33008 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
33009 #endif
33011 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
33012 struct attribute_spec.handler. */
33013 static tree
33014 rs6000_handle_struct_attribute (tree *node, tree name,
33015 tree args ATTRIBUTE_UNUSED,
33016 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
33018 tree *type = NULL;
33019 if (DECL_P (*node))
33021 if (TREE_CODE (*node) == TYPE_DECL)
33022 type = &TREE_TYPE (*node);
33024 else
33025 type = node;
33027 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
33028 || TREE_CODE (*type) == UNION_TYPE)))
33030 warning (OPT_Wattributes, "%qE attribute ignored", name);
33031 *no_add_attrs = true;
33034 else if ((is_attribute_p ("ms_struct", name)
33035 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
33036 || ((is_attribute_p ("gcc_struct", name)
33037 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
33039 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
33040 name);
33041 *no_add_attrs = true;
33044 return NULL_TREE;
33047 static bool
33048 rs6000_ms_bitfield_layout_p (const_tree record_type)
33050 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
33051 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
33052 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
33055 #ifdef USING_ELFOS_H
33057 /* A get_unnamed_section callback, used for switching to toc_section. */
33059 static void
33060 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33062 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33063 && TARGET_MINIMAL_TOC)
33065 if (!toc_initialized)
33067 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33068 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33069 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33070 fprintf (asm_out_file, "\t.tc ");
33071 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33072 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33073 fprintf (asm_out_file, "\n");
33075 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33076 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33077 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33078 fprintf (asm_out_file, " = .+32768\n");
33079 toc_initialized = 1;
33081 else
33082 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33084 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33086 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33087 if (!toc_initialized)
33089 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33090 toc_initialized = 1;
33093 else
33095 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33096 if (!toc_initialized)
33098 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33099 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33100 fprintf (asm_out_file, " = .+32768\n");
33101 toc_initialized = 1;
33106 /* Implement TARGET_ASM_INIT_SECTIONS. */
33108 static void
33109 rs6000_elf_asm_init_sections (void)
33111 toc_section
33112 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33114 sdata2_section
33115 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33116 SDATA2_SECTION_ASM_OP);
33119 /* Implement TARGET_SELECT_RTX_SECTION. */
33121 static section *
33122 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33123 unsigned HOST_WIDE_INT align)
33125 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33126 return toc_section;
33127 else
33128 return default_elf_select_rtx_section (mode, x, align);
33131 /* For a SYMBOL_REF, set generic flags and then perform some
33132 target-specific processing.
33134 When the AIX ABI is requested on a non-AIX system, replace the
33135 function name with the real name (with a leading .) rather than the
33136 function descriptor name. This saves a lot of overriding code to
33137 read the prefixes. */
33139 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33140 static void
33141 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33143 default_encode_section_info (decl, rtl, first);
33145 if (first
33146 && TREE_CODE (decl) == FUNCTION_DECL
33147 && !TARGET_AIX
33148 && DEFAULT_ABI == ABI_AIX)
33150 rtx sym_ref = XEXP (rtl, 0);
33151 size_t len = strlen (XSTR (sym_ref, 0));
33152 char *str = XALLOCAVEC (char, len + 2);
33153 str[0] = '.';
33154 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33155 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33159 static inline bool
33160 compare_section_name (const char *section, const char *templ)
33162 int len;
33164 len = strlen (templ);
33165 return (strncmp (section, templ, len) == 0
33166 && (section[len] == 0 || section[len] == '.'));
33169 bool
33170 rs6000_elf_in_small_data_p (const_tree decl)
33172 if (rs6000_sdata == SDATA_NONE)
33173 return false;
33175 /* We want to merge strings, so we never consider them small data. */
33176 if (TREE_CODE (decl) == STRING_CST)
33177 return false;
33179 /* Functions are never in the small data area. */
33180 if (TREE_CODE (decl) == FUNCTION_DECL)
33181 return false;
33183 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33185 const char *section = DECL_SECTION_NAME (decl);
33186 if (compare_section_name (section, ".sdata")
33187 || compare_section_name (section, ".sdata2")
33188 || compare_section_name (section, ".gnu.linkonce.s")
33189 || compare_section_name (section, ".sbss")
33190 || compare_section_name (section, ".sbss2")
33191 || compare_section_name (section, ".gnu.linkonce.sb")
33192 || strcmp (section, ".PPC.EMB.sdata0") == 0
33193 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33194 return true;
33196 else
33198 /* If we are told not to put readonly data in sdata, then don't. */
33199 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33200 && !rs6000_readonly_in_sdata)
33201 return false;
33203 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33205 if (size > 0
33206 && size <= g_switch_value
33207 /* If it's not public, and we're not going to reference it there,
33208 there's no need to put it in the small data section. */
33209 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33210 return true;
33213 return false;
33216 #endif /* USING_ELFOS_H */
33218 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33220 static bool
33221 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33223 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33226 /* Do not place thread-local symbols refs in the object blocks. */
33228 static bool
33229 rs6000_use_blocks_for_decl_p (const_tree decl)
33231 return !DECL_THREAD_LOCAL_P (decl);
33234 /* Return a REG that occurs in ADDR with coefficient 1.
33235 ADDR can be effectively incremented by incrementing REG.
33237 r0 is special and we must not select it as an address
33238 register by this routine since our caller will try to
33239 increment the returned register via an "la" instruction. */
33242 find_addr_reg (rtx addr)
33244 while (GET_CODE (addr) == PLUS)
33246 if (GET_CODE (XEXP (addr, 0)) == REG
33247 && REGNO (XEXP (addr, 0)) != 0)
33248 addr = XEXP (addr, 0);
33249 else if (GET_CODE (XEXP (addr, 1)) == REG
33250 && REGNO (XEXP (addr, 1)) != 0)
33251 addr = XEXP (addr, 1);
33252 else if (CONSTANT_P (XEXP (addr, 0)))
33253 addr = XEXP (addr, 1);
33254 else if (CONSTANT_P (XEXP (addr, 1)))
33255 addr = XEXP (addr, 0);
33256 else
33257 gcc_unreachable ();
33259 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33260 return addr;
33263 void
33264 rs6000_fatal_bad_address (rtx op)
33266 fatal_insn ("bad address", op);
33269 #if TARGET_MACHO
33271 typedef struct branch_island_d {
33272 tree function_name;
33273 tree label_name;
33274 int line_number;
33275 } branch_island;
33278 static vec<branch_island, va_gc> *branch_islands;
33280 /* Remember to generate a branch island for far calls to the given
33281 function. */
33283 static void
33284 add_compiler_branch_island (tree label_name, tree function_name,
33285 int line_number)
33287 branch_island bi = {function_name, label_name, line_number};
33288 vec_safe_push (branch_islands, bi);
33291 /* Generate far-jump branch islands for everything recorded in
33292 branch_islands. Invoked immediately after the last instruction of
33293 the epilogue has been emitted; the branch islands must be appended
33294 to, and contiguous with, the function body. Mach-O stubs are
33295 generated in machopic_output_stub(). */
33297 static void
33298 macho_branch_islands (void)
33300 char tmp_buf[512];
33302 while (!vec_safe_is_empty (branch_islands))
33304 branch_island *bi = &branch_islands->last ();
33305 const char *label = IDENTIFIER_POINTER (bi->label_name);
33306 const char *name = IDENTIFIER_POINTER (bi->function_name);
33307 char name_buf[512];
33308 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33309 if (name[0] == '*' || name[0] == '&')
33310 strcpy (name_buf, name+1);
33311 else
33313 name_buf[0] = '_';
33314 strcpy (name_buf+1, name);
33316 strcpy (tmp_buf, "\n");
33317 strcat (tmp_buf, label);
33318 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33319 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33320 dbxout_stabd (N_SLINE, bi->line_number);
33321 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33322 if (flag_pic)
33324 if (TARGET_LINK_STACK)
33326 char name[32];
33327 get_ppc476_thunk_name (name);
33328 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33329 strcat (tmp_buf, name);
33330 strcat (tmp_buf, "\n");
33331 strcat (tmp_buf, label);
33332 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33334 else
33336 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33337 strcat (tmp_buf, label);
33338 strcat (tmp_buf, "_pic\n");
33339 strcat (tmp_buf, label);
33340 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33343 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33344 strcat (tmp_buf, name_buf);
33345 strcat (tmp_buf, " - ");
33346 strcat (tmp_buf, label);
33347 strcat (tmp_buf, "_pic)\n");
33349 strcat (tmp_buf, "\tmtlr r0\n");
33351 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33352 strcat (tmp_buf, name_buf);
33353 strcat (tmp_buf, " - ");
33354 strcat (tmp_buf, label);
33355 strcat (tmp_buf, "_pic)\n");
33357 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33359 else
33361 strcat (tmp_buf, ":\nlis r12,hi16(");
33362 strcat (tmp_buf, name_buf);
33363 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33364 strcat (tmp_buf, name_buf);
33365 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33367 output_asm_insn (tmp_buf, 0);
33368 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33369 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33370 dbxout_stabd (N_SLINE, bi->line_number);
33371 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33372 branch_islands->pop ();
33376 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33377 already there or not. */
33379 static int
33380 no_previous_def (tree function_name)
33382 branch_island *bi;
33383 unsigned ix;
33385 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33386 if (function_name == bi->function_name)
33387 return 0;
33388 return 1;
33391 /* GET_PREV_LABEL gets the label name from the previous definition of
33392 the function. */
33394 static tree
33395 get_prev_label (tree function_name)
33397 branch_island *bi;
33398 unsigned ix;
33400 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33401 if (function_name == bi->function_name)
33402 return bi->label_name;
33403 return NULL_TREE;
33406 /* INSN is either a function call or a millicode call. It may have an
33407 unconditional jump in its delay slot.
33409 CALL_DEST is the routine we are calling. */
33411 char *
33412 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33413 int cookie_operand_number)
33415 static char buf[256];
33416 if (darwin_emit_branch_islands
33417 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33418 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33420 tree labelname;
33421 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33423 if (no_previous_def (funname))
33425 rtx label_rtx = gen_label_rtx ();
33426 char *label_buf, temp_buf[256];
33427 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33428 CODE_LABEL_NUMBER (label_rtx));
33429 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33430 labelname = get_identifier (label_buf);
33431 add_compiler_branch_island (labelname, funname, insn_line (insn));
33433 else
33434 labelname = get_prev_label (funname);
33436 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33437 instruction will reach 'foo', otherwise link as 'bl L42'".
33438 "L42" should be a 'branch island', that will do a far jump to
33439 'foo'. Branch islands are generated in
33440 macho_branch_islands(). */
33441 sprintf (buf, "jbsr %%z%d,%.246s",
33442 dest_operand_number, IDENTIFIER_POINTER (labelname));
33444 else
33445 sprintf (buf, "bl %%z%d", dest_operand_number);
33446 return buf;
33449 /* Generate PIC and indirect symbol stubs. */
33451 void
33452 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33454 unsigned int length;
33455 char *symbol_name, *lazy_ptr_name;
33456 char *local_label_0;
33457 static int label = 0;
33459 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33460 symb = (*targetm.strip_name_encoding) (symb);
33463 length = strlen (symb);
33464 symbol_name = XALLOCAVEC (char, length + 32);
33465 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33467 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33468 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33470 if (flag_pic == 2)
33471 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33472 else
33473 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33475 if (flag_pic == 2)
33477 fprintf (file, "\t.align 5\n");
33479 fprintf (file, "%s:\n", stub);
33480 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33482 label++;
33483 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33484 sprintf (local_label_0, "\"L%011d$spb\"", label);
33486 fprintf (file, "\tmflr r0\n");
33487 if (TARGET_LINK_STACK)
33489 char name[32];
33490 get_ppc476_thunk_name (name);
33491 fprintf (file, "\tbl %s\n", name);
33492 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33494 else
33496 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33497 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33499 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33500 lazy_ptr_name, local_label_0);
33501 fprintf (file, "\tmtlr r0\n");
33502 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33503 (TARGET_64BIT ? "ldu" : "lwzu"),
33504 lazy_ptr_name, local_label_0);
33505 fprintf (file, "\tmtctr r12\n");
33506 fprintf (file, "\tbctr\n");
33508 else
33510 fprintf (file, "\t.align 4\n");
33512 fprintf (file, "%s:\n", stub);
33513 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33515 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33516 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33517 (TARGET_64BIT ? "ldu" : "lwzu"),
33518 lazy_ptr_name);
33519 fprintf (file, "\tmtctr r12\n");
33520 fprintf (file, "\tbctr\n");
33523 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33524 fprintf (file, "%s:\n", lazy_ptr_name);
33525 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33526 fprintf (file, "%sdyld_stub_binding_helper\n",
33527 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33530 /* Legitimize PIC addresses. If the address is already
33531 position-independent, we return ORIG. Newly generated
33532 position-independent addresses go into a reg. This is REG if non
33533 zero, otherwise we allocate register(s) as necessary. */
33535 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33538 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33539 rtx reg)
33541 rtx base, offset;
33543 if (reg == NULL && !reload_completed)
33544 reg = gen_reg_rtx (Pmode);
33546 if (GET_CODE (orig) == CONST)
33548 rtx reg_temp;
33550 if (GET_CODE (XEXP (orig, 0)) == PLUS
33551 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33552 return orig;
33554 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33556 /* Use a different reg for the intermediate value, as
33557 it will be marked UNCHANGING. */
33558 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33559 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33560 Pmode, reg_temp);
33561 offset =
33562 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33563 Pmode, reg);
33565 if (GET_CODE (offset) == CONST_INT)
33567 if (SMALL_INT (offset))
33568 return plus_constant (Pmode, base, INTVAL (offset));
33569 else if (!reload_completed)
33570 offset = force_reg (Pmode, offset);
33571 else
33573 rtx mem = force_const_mem (Pmode, orig);
33574 return machopic_legitimize_pic_address (mem, Pmode, reg);
33577 return gen_rtx_PLUS (Pmode, base, offset);
33580 /* Fall back on generic machopic code. */
33581 return machopic_legitimize_pic_address (orig, mode, reg);
33584 /* Output a .machine directive for the Darwin assembler, and call
33585 the generic start_file routine. */
33587 static void
33588 rs6000_darwin_file_start (void)
33590 static const struct
33592 const char *arg;
33593 const char *name;
33594 HOST_WIDE_INT if_set;
33595 } mapping[] = {
33596 { "ppc64", "ppc64", MASK_64BIT },
33597 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33598 { "power4", "ppc970", 0 },
33599 { "G5", "ppc970", 0 },
33600 { "7450", "ppc7450", 0 },
33601 { "7400", "ppc7400", MASK_ALTIVEC },
33602 { "G4", "ppc7400", 0 },
33603 { "750", "ppc750", 0 },
33604 { "740", "ppc750", 0 },
33605 { "G3", "ppc750", 0 },
33606 { "604e", "ppc604e", 0 },
33607 { "604", "ppc604", 0 },
33608 { "603e", "ppc603", 0 },
33609 { "603", "ppc603", 0 },
33610 { "601", "ppc601", 0 },
33611 { NULL, "ppc", 0 } };
33612 const char *cpu_id = "";
33613 size_t i;
33615 rs6000_file_start ();
33616 darwin_file_start ();
33618 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33620 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33621 cpu_id = rs6000_default_cpu;
33623 if (global_options_set.x_rs6000_cpu_index)
33624 cpu_id = processor_target_table[rs6000_cpu_index].name;
33626 /* Look through the mapping array. Pick the first name that either
33627 matches the argument, has a bit set in IF_SET that is also set
33628 in the target flags, or has a NULL name. */
33630 i = 0;
33631 while (mapping[i].arg != NULL
33632 && strcmp (mapping[i].arg, cpu_id) != 0
33633 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33634 i++;
33636 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33639 #endif /* TARGET_MACHO */
33641 #if TARGET_ELF
33642 static int
33643 rs6000_elf_reloc_rw_mask (void)
33645 if (flag_pic)
33646 return 3;
33647 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33648 return 2;
33649 else
33650 return 0;
33653 /* Record an element in the table of global constructors. SYMBOL is
33654 a SYMBOL_REF of the function to be called; PRIORITY is a number
33655 between 0 and MAX_INIT_PRIORITY.
33657 This differs from default_named_section_asm_out_constructor in
33658 that we have special handling for -mrelocatable. */
33660 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33661 static void
33662 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33664 const char *section = ".ctors";
33665 char buf[18];
33667 if (priority != DEFAULT_INIT_PRIORITY)
33669 sprintf (buf, ".ctors.%.5u",
33670 /* Invert the numbering so the linker puts us in the proper
33671 order; constructors are run from right to left, and the
33672 linker sorts in increasing order. */
33673 MAX_INIT_PRIORITY - priority);
33674 section = buf;
33677 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33678 assemble_align (POINTER_SIZE);
33680 if (DEFAULT_ABI == ABI_V4
33681 && (TARGET_RELOCATABLE || flag_pic > 1))
33683 fputs ("\t.long (", asm_out_file);
33684 output_addr_const (asm_out_file, symbol);
33685 fputs (")@fixup\n", asm_out_file);
33687 else
33688 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33691 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33692 static void
33693 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33695 const char *section = ".dtors";
33696 char buf[18];
33698 if (priority != DEFAULT_INIT_PRIORITY)
33700 sprintf (buf, ".dtors.%.5u",
33701 /* Invert the numbering so the linker puts us in the proper
33702 order; constructors are run from right to left, and the
33703 linker sorts in increasing order. */
33704 MAX_INIT_PRIORITY - priority);
33705 section = buf;
33708 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33709 assemble_align (POINTER_SIZE);
33711 if (DEFAULT_ABI == ABI_V4
33712 && (TARGET_RELOCATABLE || flag_pic > 1))
33714 fputs ("\t.long (", asm_out_file);
33715 output_addr_const (asm_out_file, symbol);
33716 fputs (")@fixup\n", asm_out_file);
33718 else
33719 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33722 void
33723 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33725 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33727 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33728 ASM_OUTPUT_LABEL (file, name);
33729 fputs (DOUBLE_INT_ASM_OP, file);
33730 rs6000_output_function_entry (file, name);
33731 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33732 if (DOT_SYMBOLS)
33734 fputs ("\t.size\t", file);
33735 assemble_name (file, name);
33736 fputs (",24\n\t.type\t.", file);
33737 assemble_name (file, name);
33738 fputs (",@function\n", file);
33739 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33741 fputs ("\t.globl\t.", file);
33742 assemble_name (file, name);
33743 putc ('\n', file);
33746 else
33747 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33748 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33749 rs6000_output_function_entry (file, name);
33750 fputs (":\n", file);
33751 return;
33754 int uses_toc;
33755 if (DEFAULT_ABI == ABI_V4
33756 && (TARGET_RELOCATABLE || flag_pic > 1)
33757 && !TARGET_SECURE_PLT
33758 && (!constant_pool_empty_p () || crtl->profile)
33759 && (uses_toc = uses_TOC ()))
33761 char buf[256];
33763 if (uses_toc == 2)
33764 switch_to_other_text_partition ();
33765 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33767 fprintf (file, "\t.long ");
33768 assemble_name (file, toc_label_name);
33769 need_toc_init = 1;
33770 putc ('-', file);
33771 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33772 assemble_name (file, buf);
33773 putc ('\n', file);
33774 if (uses_toc == 2)
33775 switch_to_other_text_partition ();
33778 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33779 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33781 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33783 char buf[256];
33785 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33787 fprintf (file, "\t.quad .TOC.-");
33788 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33789 assemble_name (file, buf);
33790 putc ('\n', file);
33793 if (DEFAULT_ABI == ABI_AIX)
33795 const char *desc_name, *orig_name;
33797 orig_name = (*targetm.strip_name_encoding) (name);
33798 desc_name = orig_name;
33799 while (*desc_name == '.')
33800 desc_name++;
33802 if (TREE_PUBLIC (decl))
33803 fprintf (file, "\t.globl %s\n", desc_name);
33805 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33806 fprintf (file, "%s:\n", desc_name);
33807 fprintf (file, "\t.long %s\n", orig_name);
33808 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33809 fputs ("\t.long 0\n", file);
33810 fprintf (file, "\t.previous\n");
33812 ASM_OUTPUT_LABEL (file, name);
33815 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33816 static void
33817 rs6000_elf_file_end (void)
33819 #ifdef HAVE_AS_GNU_ATTRIBUTE
33820 /* ??? The value emitted depends on options active at file end.
33821 Assume anyone using #pragma or attributes that might change
33822 options knows what they are doing. */
33823 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33824 && rs6000_passes_float)
33826 int fp;
33828 if (TARGET_DF_FPR)
33829 fp = 1;
33830 else if (TARGET_SF_FPR)
33831 fp = 3;
33832 else
33833 fp = 2;
33834 if (rs6000_passes_long_double)
33836 if (!TARGET_LONG_DOUBLE_128)
33837 fp |= 2 * 4;
33838 else if (TARGET_IEEEQUAD)
33839 fp |= 3 * 4;
33840 else
33841 fp |= 1 * 4;
33843 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33845 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33847 if (rs6000_passes_vector)
33848 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33849 (TARGET_ALTIVEC_ABI ? 2 : 1));
33850 if (rs6000_returns_struct)
33851 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33852 aix_struct_return ? 2 : 1);
33854 #endif
33855 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33856 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33857 file_end_indicate_exec_stack ();
33858 #endif
33860 if (flag_split_stack)
33861 file_end_indicate_split_stack ();
33863 if (cpu_builtin_p)
33865 /* We have expanded a CPU builtin, so we need to emit a reference to
33866 the special symbol that LIBC uses to declare it supports the
33867 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33868 switch_to_section (data_section);
33869 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33870 fprintf (asm_out_file, "\t%s %s\n",
33871 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33874 #endif
33876 #if TARGET_XCOFF
33878 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33879 #define HAVE_XCOFF_DWARF_EXTRAS 0
33880 #endif
33882 static enum unwind_info_type
33883 rs6000_xcoff_debug_unwind_info (void)
33885 return UI_NONE;
33888 static void
33889 rs6000_xcoff_asm_output_anchor (rtx symbol)
33891 char buffer[100];
33893 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33894 SYMBOL_REF_BLOCK_OFFSET (symbol));
33895 fprintf (asm_out_file, "%s", SET_ASM_OP);
33896 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33897 fprintf (asm_out_file, ",");
33898 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33899 fprintf (asm_out_file, "\n");
33902 static void
33903 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33905 fputs (GLOBAL_ASM_OP, stream);
33906 RS6000_OUTPUT_BASENAME (stream, name);
33907 putc ('\n', stream);
33910 /* A get_unnamed_decl callback, used for read-only sections. PTR
33911 points to the section string variable. */
33913 static void
33914 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33916 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33917 *(const char *const *) directive,
33918 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33921 /* Likewise for read-write sections. */
33923 static void
33924 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33926 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33927 *(const char *const *) directive,
33928 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33931 static void
33932 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33934 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33935 *(const char *const *) directive,
33936 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33939 /* A get_unnamed_section callback, used for switching to toc_section. */
33941 static void
33942 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33944 if (TARGET_MINIMAL_TOC)
33946 /* toc_section is always selected at least once from
33947 rs6000_xcoff_file_start, so this is guaranteed to
33948 always be defined once and only once in each file. */
33949 if (!toc_initialized)
33951 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33952 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33953 toc_initialized = 1;
33955 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33956 (TARGET_32BIT ? "" : ",3"));
33958 else
33959 fputs ("\t.toc\n", asm_out_file);
33962 /* Implement TARGET_ASM_INIT_SECTIONS. */
33964 static void
33965 rs6000_xcoff_asm_init_sections (void)
33967 read_only_data_section
33968 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33969 &xcoff_read_only_section_name);
33971 private_data_section
33972 = get_unnamed_section (SECTION_WRITE,
33973 rs6000_xcoff_output_readwrite_section_asm_op,
33974 &xcoff_private_data_section_name);
33976 tls_data_section
33977 = get_unnamed_section (SECTION_TLS,
33978 rs6000_xcoff_output_tls_section_asm_op,
33979 &xcoff_tls_data_section_name);
33981 tls_private_data_section
33982 = get_unnamed_section (SECTION_TLS,
33983 rs6000_xcoff_output_tls_section_asm_op,
33984 &xcoff_private_data_section_name);
33986 read_only_private_data_section
33987 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33988 &xcoff_private_data_section_name);
33990 toc_section
33991 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33993 readonly_data_section = read_only_data_section;
33996 static int
33997 rs6000_xcoff_reloc_rw_mask (void)
33999 return 3;
34002 static void
34003 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
34004 tree decl ATTRIBUTE_UNUSED)
34006 int smclass;
34007 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
34009 if (flags & SECTION_EXCLUDE)
34010 smclass = 4;
34011 else if (flags & SECTION_DEBUG)
34013 fprintf (asm_out_file, "\t.dwsect %s\n", name);
34014 return;
34016 else if (flags & SECTION_CODE)
34017 smclass = 0;
34018 else if (flags & SECTION_TLS)
34019 smclass = 3;
34020 else if (flags & SECTION_WRITE)
34021 smclass = 2;
34022 else
34023 smclass = 1;
34025 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
34026 (flags & SECTION_CODE) ? "." : "",
34027 name, suffix[smclass], flags & SECTION_ENTSIZE);
34030 #define IN_NAMED_SECTION(DECL) \
34031 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34032 && DECL_SECTION_NAME (DECL) != NULL)
34034 static section *
34035 rs6000_xcoff_select_section (tree decl, int reloc,
34036 unsigned HOST_WIDE_INT align)
34038 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34039 named section. */
34040 if (align > BIGGEST_ALIGNMENT)
34042 resolve_unique_section (decl, reloc, true);
34043 if (IN_NAMED_SECTION (decl))
34044 return get_named_section (decl, NULL, reloc);
34047 if (decl_readonly_section (decl, reloc))
34049 if (TREE_PUBLIC (decl))
34050 return read_only_data_section;
34051 else
34052 return read_only_private_data_section;
34054 else
34056 #if HAVE_AS_TLS
34057 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34059 if (TREE_PUBLIC (decl))
34060 return tls_data_section;
34061 else if (bss_initializer_p (decl))
34063 /* Convert to COMMON to emit in BSS. */
34064 DECL_COMMON (decl) = 1;
34065 return tls_comm_section;
34067 else
34068 return tls_private_data_section;
34070 else
34071 #endif
34072 if (TREE_PUBLIC (decl))
34073 return data_section;
34074 else
34075 return private_data_section;
34079 static void
34080 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
34082 const char *name;
34084 /* Use select_section for private data and uninitialized data with
34085 alignment <= BIGGEST_ALIGNMENT. */
34086 if (!TREE_PUBLIC (decl)
34087 || DECL_COMMON (decl)
34088 || (DECL_INITIAL (decl) == NULL_TREE
34089 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
34090 || DECL_INITIAL (decl) == error_mark_node
34091 || (flag_zero_initialized_in_bss
34092 && initializer_zerop (DECL_INITIAL (decl))))
34093 return;
34095 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
34096 name = (*targetm.strip_name_encoding) (name);
34097 set_decl_section_name (decl, name);
34100 /* Select section for constant in constant pool.
34102 On RS/6000, all constants are in the private read-only data area.
34103 However, if this is being placed in the TOC it must be output as a
34104 toc entry. */
34106 static section *
34107 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34108 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34110 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34111 return toc_section;
34112 else
34113 return read_only_private_data_section;
34116 /* Remove any trailing [DS] or the like from the symbol name. */
34118 static const char *
34119 rs6000_xcoff_strip_name_encoding (const char *name)
34121 size_t len;
34122 if (*name == '*')
34123 name++;
34124 len = strlen (name);
34125 if (name[len - 1] == ']')
34126 return ggc_alloc_string (name, len - 4);
34127 else
34128 return name;
34131 /* Section attributes. AIX is always PIC. */
34133 static unsigned int
34134 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34136 unsigned int align;
34137 unsigned int flags = default_section_type_flags (decl, name, reloc);
34139 /* Align to at least UNIT size. */
34140 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34141 align = MIN_UNITS_PER_WORD;
34142 else
34143 /* Increase alignment of large objects if not already stricter. */
34144 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34145 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34146 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34148 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34151 /* Output at beginning of assembler file.
34153 Initialize the section names for the RS/6000 at this point.
34155 Specify filename, including full path, to assembler.
34157 We want to go into the TOC section so at least one .toc will be emitted.
34158 Also, in order to output proper .bs/.es pairs, we need at least one static
34159 [RW] section emitted.
34161 Finally, declare mcount when profiling to make the assembler happy. */
34163 static void
34164 rs6000_xcoff_file_start (void)
34166 rs6000_gen_section_name (&xcoff_bss_section_name,
34167 main_input_filename, ".bss_");
34168 rs6000_gen_section_name (&xcoff_private_data_section_name,
34169 main_input_filename, ".rw_");
34170 rs6000_gen_section_name (&xcoff_read_only_section_name,
34171 main_input_filename, ".ro_");
34172 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34173 main_input_filename, ".tls_");
34174 rs6000_gen_section_name (&xcoff_tbss_section_name,
34175 main_input_filename, ".tbss_[UL]");
34177 fputs ("\t.file\t", asm_out_file);
34178 output_quoted_string (asm_out_file, main_input_filename);
34179 fputc ('\n', asm_out_file);
34180 if (write_symbols != NO_DEBUG)
34181 switch_to_section (private_data_section);
34182 switch_to_section (toc_section);
34183 switch_to_section (text_section);
34184 if (profile_flag)
34185 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34186 rs6000_file_start ();
34189 /* Output at end of assembler file.
34190 On the RS/6000, referencing data should automatically pull in text. */
34192 static void
34193 rs6000_xcoff_file_end (void)
34195 switch_to_section (text_section);
34196 fputs ("_section_.text:\n", asm_out_file);
34197 switch_to_section (data_section);
34198 fputs (TARGET_32BIT
34199 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34200 asm_out_file);
34203 struct declare_alias_data
34205 FILE *file;
34206 bool function_descriptor;
34209 /* Declare alias N. A helper function for for_node_and_aliases. */
34211 static bool
34212 rs6000_declare_alias (struct symtab_node *n, void *d)
34214 struct declare_alias_data *data = (struct declare_alias_data *)d;
34215 /* Main symbol is output specially, because varasm machinery does part of
34216 the job for us - we do not need to declare .globl/lglobs and such. */
34217 if (!n->alias || n->weakref)
34218 return false;
34220 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34221 return false;
34223 /* Prevent assemble_alias from trying to use .set pseudo operation
34224 that does not behave as expected by the middle-end. */
34225 TREE_ASM_WRITTEN (n->decl) = true;
34227 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34228 char *buffer = (char *) alloca (strlen (name) + 2);
34229 char *p;
34230 int dollar_inside = 0;
34232 strcpy (buffer, name);
34233 p = strchr (buffer, '$');
34234 while (p) {
34235 *p = '_';
34236 dollar_inside++;
34237 p = strchr (p + 1, '$');
34239 if (TREE_PUBLIC (n->decl))
34241 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34243 if (dollar_inside) {
34244 if (data->function_descriptor)
34245 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34246 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34248 if (data->function_descriptor)
34250 fputs ("\t.globl .", data->file);
34251 RS6000_OUTPUT_BASENAME (data->file, buffer);
34252 putc ('\n', data->file);
34254 fputs ("\t.globl ", data->file);
34255 RS6000_OUTPUT_BASENAME (data->file, buffer);
34256 putc ('\n', data->file);
34258 #ifdef ASM_WEAKEN_DECL
34259 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34260 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34261 #endif
34263 else
34265 if (dollar_inside)
34267 if (data->function_descriptor)
34268 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34269 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34271 if (data->function_descriptor)
34273 fputs ("\t.lglobl .", data->file);
34274 RS6000_OUTPUT_BASENAME (data->file, buffer);
34275 putc ('\n', data->file);
34277 fputs ("\t.lglobl ", data->file);
34278 RS6000_OUTPUT_BASENAME (data->file, buffer);
34279 putc ('\n', data->file);
34281 if (data->function_descriptor)
34282 fputs (".", data->file);
34283 RS6000_OUTPUT_BASENAME (data->file, buffer);
34284 fputs (":\n", data->file);
34285 return false;
34289 #ifdef HAVE_GAS_HIDDEN
34290 /* Helper function to calculate visibility of a DECL
34291 and return the value as a const string. */
34293 static const char *
34294 rs6000_xcoff_visibility (tree decl)
34296 static const char * const visibility_types[] = {
34297 "", ",protected", ",hidden", ",internal"
34300 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34302 if (TREE_CODE (decl) == FUNCTION_DECL
34303 && cgraph_node::get (decl)
34304 && cgraph_node::get (decl)->instrumentation_clone
34305 && cgraph_node::get (decl)->instrumented_version)
34306 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
34308 return visibility_types[vis];
34310 #endif
34313 /* This macro produces the initial definition of a function name.
34314 On the RS/6000, we need to place an extra '.' in the function name and
34315 output the function descriptor.
34316 Dollar signs are converted to underscores.
34318 The csect for the function will have already been created when
34319 text_section was selected. We do have to go back to that csect, however.
34321 The third and fourth parameters to the .function pseudo-op (16 and 044)
34322 are placeholders which no longer have any use.
34324 Because AIX assembler's .set command has unexpected semantics, we output
34325 all aliases as alternative labels in front of the definition. */
34327 void
34328 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34330 char *buffer = (char *) alloca (strlen (name) + 1);
34331 char *p;
34332 int dollar_inside = 0;
34333 struct declare_alias_data data = {file, false};
34335 strcpy (buffer, name);
34336 p = strchr (buffer, '$');
34337 while (p) {
34338 *p = '_';
34339 dollar_inside++;
34340 p = strchr (p + 1, '$');
34342 if (TREE_PUBLIC (decl))
34344 if (!RS6000_WEAK || !DECL_WEAK (decl))
34346 if (dollar_inside) {
34347 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34348 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34350 fputs ("\t.globl .", file);
34351 RS6000_OUTPUT_BASENAME (file, buffer);
34352 #ifdef HAVE_GAS_HIDDEN
34353 fputs (rs6000_xcoff_visibility (decl), file);
34354 #endif
34355 putc ('\n', file);
34358 else
34360 if (dollar_inside) {
34361 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34362 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34364 fputs ("\t.lglobl .", file);
34365 RS6000_OUTPUT_BASENAME (file, buffer);
34366 putc ('\n', file);
34368 fputs ("\t.csect ", file);
34369 RS6000_OUTPUT_BASENAME (file, buffer);
34370 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34371 RS6000_OUTPUT_BASENAME (file, buffer);
34372 fputs (":\n", file);
34373 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34374 &data, true);
34375 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34376 RS6000_OUTPUT_BASENAME (file, buffer);
34377 fputs (", TOC[tc0], 0\n", file);
34378 in_section = NULL;
34379 switch_to_section (function_section (decl));
34380 putc ('.', file);
34381 RS6000_OUTPUT_BASENAME (file, buffer);
34382 fputs (":\n", file);
34383 data.function_descriptor = true;
34384 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34385 &data, true);
34386 if (!DECL_IGNORED_P (decl))
34388 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34389 xcoffout_declare_function (file, decl, buffer);
34390 else if (write_symbols == DWARF2_DEBUG)
34392 name = (*targetm.strip_name_encoding) (name);
34393 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34396 return;
34400 /* Output assembly language to globalize a symbol from a DECL,
34401 possibly with visibility. */
34403 void
34404 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34406 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34407 fputs (GLOBAL_ASM_OP, stream);
34408 RS6000_OUTPUT_BASENAME (stream, name);
34409 #ifdef HAVE_GAS_HIDDEN
34410 fputs (rs6000_xcoff_visibility (decl), stream);
34411 #endif
34412 putc ('\n', stream);
34415 /* Output assembly language to define a symbol as COMMON from a DECL,
34416 possibly with visibility. */
34418 void
34419 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34420 tree decl ATTRIBUTE_UNUSED,
34421 const char *name,
34422 unsigned HOST_WIDE_INT size,
34423 unsigned HOST_WIDE_INT align)
34425 unsigned HOST_WIDE_INT align2 = 2;
34427 if (align > 32)
34428 align2 = floor_log2 (align / BITS_PER_UNIT);
34429 else if (size > 4)
34430 align2 = 3;
34432 fputs (COMMON_ASM_OP, stream);
34433 RS6000_OUTPUT_BASENAME (stream, name);
34435 fprintf (stream,
34436 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34437 size, align2);
34439 #ifdef HAVE_GAS_HIDDEN
34440 if (decl != NULL)
34441 fputs (rs6000_xcoff_visibility (decl), stream);
34442 #endif
34443 putc ('\n', stream);
34446 /* This macro produces the initial definition of a object (variable) name.
34447 Because AIX assembler's .set command has unexpected semantics, we output
34448 all aliases as alternative labels in front of the definition. */
34450 void
34451 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34453 struct declare_alias_data data = {file, false};
34454 RS6000_OUTPUT_BASENAME (file, name);
34455 fputs (":\n", file);
34456 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34457 &data, true);
34460 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34462 void
34463 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34465 fputs (integer_asm_op (size, FALSE), file);
34466 assemble_name (file, label);
34467 fputs ("-$", file);
34470 /* Output a symbol offset relative to the dbase for the current object.
34471 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34472 signed offsets.
34474 __gcc_unwind_dbase is embedded in all executables/libraries through
34475 libgcc/config/rs6000/crtdbase.S. */
34477 void
34478 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34480 fputs (integer_asm_op (size, FALSE), file);
34481 assemble_name (file, label);
34482 fputs("-__gcc_unwind_dbase", file);
34485 #ifdef HAVE_AS_TLS
34486 static void
34487 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34489 rtx symbol;
34490 int flags;
34491 const char *symname;
34493 default_encode_section_info (decl, rtl, first);
34495 /* Careful not to prod global register variables. */
34496 if (!MEM_P (rtl))
34497 return;
34498 symbol = XEXP (rtl, 0);
34499 if (GET_CODE (symbol) != SYMBOL_REF)
34500 return;
34502 flags = SYMBOL_REF_FLAGS (symbol);
34504 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34505 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34507 SYMBOL_REF_FLAGS (symbol) = flags;
34509 /* Append mapping class to extern decls. */
34510 symname = XSTR (symbol, 0);
34511 if (decl /* sync condition with assemble_external () */
34512 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34513 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34514 || TREE_CODE (decl) == FUNCTION_DECL)
34515 && symname[strlen (symname) - 1] != ']')
34517 char *newname = (char *) alloca (strlen (symname) + 5);
34518 strcpy (newname, symname);
34519 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34520 ? "[DS]" : "[UA]"));
34521 XSTR (symbol, 0) = ggc_strdup (newname);
34524 #endif /* HAVE_AS_TLS */
34525 #endif /* TARGET_XCOFF */
34527 void
34528 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34529 const char *name, const char *val)
34531 fputs ("\t.weak\t", stream);
34532 RS6000_OUTPUT_BASENAME (stream, name);
34533 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34534 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34536 if (TARGET_XCOFF)
34537 fputs ("[DS]", stream);
34538 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34539 if (TARGET_XCOFF)
34540 fputs (rs6000_xcoff_visibility (decl), stream);
34541 #endif
34542 fputs ("\n\t.weak\t.", stream);
34543 RS6000_OUTPUT_BASENAME (stream, name);
34545 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34546 if (TARGET_XCOFF)
34547 fputs (rs6000_xcoff_visibility (decl), stream);
34548 #endif
34549 fputc ('\n', stream);
34550 if (val)
34552 #ifdef ASM_OUTPUT_DEF
34553 ASM_OUTPUT_DEF (stream, name, val);
34554 #endif
34555 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34556 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34558 fputs ("\t.set\t.", stream);
34559 RS6000_OUTPUT_BASENAME (stream, name);
34560 fputs (",.", stream);
34561 RS6000_OUTPUT_BASENAME (stream, val);
34562 fputc ('\n', stream);
34568 /* Return true if INSN should not be copied. */
34570 static bool
34571 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34573 return recog_memoized (insn) >= 0
34574 && get_attr_cannot_copy (insn);
34577 /* Compute a (partial) cost for rtx X. Return true if the complete
34578 cost has been computed, and false if subexpressions should be
34579 scanned. In either case, *TOTAL contains the cost result. */
34581 static bool
34582 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34583 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34585 int code = GET_CODE (x);
34587 switch (code)
34589 /* On the RS/6000, if it is valid in the insn, it is free. */
34590 case CONST_INT:
34591 if (((outer_code == SET
34592 || outer_code == PLUS
34593 || outer_code == MINUS)
34594 && (satisfies_constraint_I (x)
34595 || satisfies_constraint_L (x)))
34596 || (outer_code == AND
34597 && (satisfies_constraint_K (x)
34598 || (mode == SImode
34599 ? satisfies_constraint_L (x)
34600 : satisfies_constraint_J (x))))
34601 || ((outer_code == IOR || outer_code == XOR)
34602 && (satisfies_constraint_K (x)
34603 || (mode == SImode
34604 ? satisfies_constraint_L (x)
34605 : satisfies_constraint_J (x))))
34606 || outer_code == ASHIFT
34607 || outer_code == ASHIFTRT
34608 || outer_code == LSHIFTRT
34609 || outer_code == ROTATE
34610 || outer_code == ROTATERT
34611 || outer_code == ZERO_EXTRACT
34612 || (outer_code == MULT
34613 && satisfies_constraint_I (x))
34614 || ((outer_code == DIV || outer_code == UDIV
34615 || outer_code == MOD || outer_code == UMOD)
34616 && exact_log2 (INTVAL (x)) >= 0)
34617 || (outer_code == COMPARE
34618 && (satisfies_constraint_I (x)
34619 || satisfies_constraint_K (x)))
34620 || ((outer_code == EQ || outer_code == NE)
34621 && (satisfies_constraint_I (x)
34622 || satisfies_constraint_K (x)
34623 || (mode == SImode
34624 ? satisfies_constraint_L (x)
34625 : satisfies_constraint_J (x))))
34626 || (outer_code == GTU
34627 && satisfies_constraint_I (x))
34628 || (outer_code == LTU
34629 && satisfies_constraint_P (x)))
34631 *total = 0;
34632 return true;
34634 else if ((outer_code == PLUS
34635 && reg_or_add_cint_operand (x, VOIDmode))
34636 || (outer_code == MINUS
34637 && reg_or_sub_cint_operand (x, VOIDmode))
34638 || ((outer_code == SET
34639 || outer_code == IOR
34640 || outer_code == XOR)
34641 && (INTVAL (x)
34642 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34644 *total = COSTS_N_INSNS (1);
34645 return true;
34647 /* FALLTHRU */
34649 case CONST_DOUBLE:
34650 case CONST_WIDE_INT:
34651 case CONST:
34652 case HIGH:
34653 case SYMBOL_REF:
34654 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34655 return true;
34657 case MEM:
34658 /* When optimizing for size, MEM should be slightly more expensive
34659 than generating address, e.g., (plus (reg) (const)).
34660 L1 cache latency is about two instructions. */
34661 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34662 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34663 *total += COSTS_N_INSNS (100);
34664 return true;
34666 case LABEL_REF:
34667 *total = 0;
34668 return true;
34670 case PLUS:
34671 case MINUS:
34672 if (FLOAT_MODE_P (mode))
34673 *total = rs6000_cost->fp;
34674 else
34675 *total = COSTS_N_INSNS (1);
34676 return false;
34678 case MULT:
34679 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34680 && satisfies_constraint_I (XEXP (x, 1)))
34682 if (INTVAL (XEXP (x, 1)) >= -256
34683 && INTVAL (XEXP (x, 1)) <= 255)
34684 *total = rs6000_cost->mulsi_const9;
34685 else
34686 *total = rs6000_cost->mulsi_const;
34688 else if (mode == SFmode)
34689 *total = rs6000_cost->fp;
34690 else if (FLOAT_MODE_P (mode))
34691 *total = rs6000_cost->dmul;
34692 else if (mode == DImode)
34693 *total = rs6000_cost->muldi;
34694 else
34695 *total = rs6000_cost->mulsi;
34696 return false;
34698 case FMA:
34699 if (mode == SFmode)
34700 *total = rs6000_cost->fp;
34701 else
34702 *total = rs6000_cost->dmul;
34703 break;
34705 case DIV:
34706 case MOD:
34707 if (FLOAT_MODE_P (mode))
34709 *total = mode == DFmode ? rs6000_cost->ddiv
34710 : rs6000_cost->sdiv;
34711 return false;
34713 /* FALLTHRU */
34715 case UDIV:
34716 case UMOD:
34717 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34718 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34720 if (code == DIV || code == MOD)
34721 /* Shift, addze */
34722 *total = COSTS_N_INSNS (2);
34723 else
34724 /* Shift */
34725 *total = COSTS_N_INSNS (1);
34727 else
34729 if (GET_MODE (XEXP (x, 1)) == DImode)
34730 *total = rs6000_cost->divdi;
34731 else
34732 *total = rs6000_cost->divsi;
34734 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34735 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34736 *total += COSTS_N_INSNS (2);
34737 return false;
34739 case CTZ:
34740 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34741 return false;
34743 case FFS:
34744 *total = COSTS_N_INSNS (4);
34745 return false;
34747 case POPCOUNT:
34748 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34749 return false;
34751 case PARITY:
34752 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34753 return false;
34755 case NOT:
34756 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34757 *total = 0;
34758 else
34759 *total = COSTS_N_INSNS (1);
34760 return false;
34762 case AND:
34763 if (CONST_INT_P (XEXP (x, 1)))
34765 rtx left = XEXP (x, 0);
34766 rtx_code left_code = GET_CODE (left);
34768 /* rotate-and-mask: 1 insn. */
34769 if ((left_code == ROTATE
34770 || left_code == ASHIFT
34771 || left_code == LSHIFTRT)
34772 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34774 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34775 if (!CONST_INT_P (XEXP (left, 1)))
34776 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34777 *total += COSTS_N_INSNS (1);
34778 return true;
34781 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34782 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34783 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34784 || (val & 0xffff) == val
34785 || (val & 0xffff0000) == val
34786 || ((val & 0xffff) == 0 && mode == SImode))
34788 *total = rtx_cost (left, mode, AND, 0, speed);
34789 *total += COSTS_N_INSNS (1);
34790 return true;
34793 /* 2 insns. */
34794 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34796 *total = rtx_cost (left, mode, AND, 0, speed);
34797 *total += COSTS_N_INSNS (2);
34798 return true;
34802 *total = COSTS_N_INSNS (1);
34803 return false;
34805 case IOR:
34806 /* FIXME */
34807 *total = COSTS_N_INSNS (1);
34808 return true;
34810 case CLZ:
34811 case XOR:
34812 case ZERO_EXTRACT:
34813 *total = COSTS_N_INSNS (1);
34814 return false;
34816 case ASHIFT:
34817 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34818 the sign extend and shift separately within the insn. */
34819 if (TARGET_EXTSWSLI && mode == DImode
34820 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34821 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34823 *total = 0;
34824 return false;
34826 /* fall through */
34828 case ASHIFTRT:
34829 case LSHIFTRT:
34830 case ROTATE:
34831 case ROTATERT:
34832 /* Handle mul_highpart. */
34833 if (outer_code == TRUNCATE
34834 && GET_CODE (XEXP (x, 0)) == MULT)
34836 if (mode == DImode)
34837 *total = rs6000_cost->muldi;
34838 else
34839 *total = rs6000_cost->mulsi;
34840 return true;
34842 else if (outer_code == AND)
34843 *total = 0;
34844 else
34845 *total = COSTS_N_INSNS (1);
34846 return false;
34848 case SIGN_EXTEND:
34849 case ZERO_EXTEND:
34850 if (GET_CODE (XEXP (x, 0)) == MEM)
34851 *total = 0;
34852 else
34853 *total = COSTS_N_INSNS (1);
34854 return false;
34856 case COMPARE:
34857 case NEG:
34858 case ABS:
34859 if (!FLOAT_MODE_P (mode))
34861 *total = COSTS_N_INSNS (1);
34862 return false;
34864 /* FALLTHRU */
34866 case FLOAT:
34867 case UNSIGNED_FLOAT:
34868 case FIX:
34869 case UNSIGNED_FIX:
34870 case FLOAT_TRUNCATE:
34871 *total = rs6000_cost->fp;
34872 return false;
34874 case FLOAT_EXTEND:
34875 if (mode == DFmode)
34876 *total = rs6000_cost->sfdf_convert;
34877 else
34878 *total = rs6000_cost->fp;
34879 return false;
34881 case UNSPEC:
34882 switch (XINT (x, 1))
34884 case UNSPEC_FRSP:
34885 *total = rs6000_cost->fp;
34886 return true;
34888 default:
34889 break;
34891 break;
34893 case CALL:
34894 case IF_THEN_ELSE:
34895 if (!speed)
34897 *total = COSTS_N_INSNS (1);
34898 return true;
34900 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34902 *total = rs6000_cost->fp;
34903 return false;
34905 break;
34907 case NE:
34908 case EQ:
34909 case GTU:
34910 case LTU:
34911 /* Carry bit requires mode == Pmode.
34912 NEG or PLUS already counted so only add one. */
34913 if (mode == Pmode
34914 && (outer_code == NEG || outer_code == PLUS))
34916 *total = COSTS_N_INSNS (1);
34917 return true;
34919 /* FALLTHRU */
34921 case GT:
34922 case LT:
34923 case UNORDERED:
34924 if (outer_code == SET)
34926 if (XEXP (x, 1) == const0_rtx)
34928 *total = COSTS_N_INSNS (2);
34929 return true;
34931 else
34933 *total = COSTS_N_INSNS (3);
34934 return false;
34937 /* CC COMPARE. */
34938 if (outer_code == COMPARE)
34940 *total = 0;
34941 return true;
34943 break;
34945 default:
34946 break;
34949 return false;
34952 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34954 static bool
34955 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34956 int opno, int *total, bool speed)
34958 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34960 fprintf (stderr,
34961 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34962 "opno = %d, total = %d, speed = %s, x:\n",
34963 ret ? "complete" : "scan inner",
34964 GET_MODE_NAME (mode),
34965 GET_RTX_NAME (outer_code),
34966 opno,
34967 *total,
34968 speed ? "true" : "false");
34970 debug_rtx (x);
34972 return ret;
34975 static int
34976 rs6000_insn_cost (rtx_insn *insn, bool speed)
34978 if (recog_memoized (insn) < 0)
34979 return 0;
34981 if (!speed)
34982 return get_attr_length (insn);
34984 int cost = get_attr_cost (insn);
34985 if (cost > 0)
34986 return cost;
34988 int n = get_attr_length (insn) / 4;
34989 enum attr_type type = get_attr_type (insn);
34991 switch (type)
34993 case TYPE_LOAD:
34994 case TYPE_FPLOAD:
34995 case TYPE_VECLOAD:
34996 cost = COSTS_N_INSNS (n + 1);
34997 break;
34999 case TYPE_MUL:
35000 switch (get_attr_size (insn))
35002 case SIZE_8:
35003 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
35004 break;
35005 case SIZE_16:
35006 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
35007 break;
35008 case SIZE_32:
35009 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
35010 break;
35011 case SIZE_64:
35012 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
35013 break;
35014 default:
35015 gcc_unreachable ();
35017 break;
35018 case TYPE_DIV:
35019 switch (get_attr_size (insn))
35021 case SIZE_32:
35022 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
35023 break;
35024 case SIZE_64:
35025 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
35026 break;
35027 default:
35028 gcc_unreachable ();
35030 break;
35032 case TYPE_FP:
35033 cost = n * rs6000_cost->fp;
35034 break;
35035 case TYPE_DMUL:
35036 cost = n * rs6000_cost->dmul;
35037 break;
35038 case TYPE_SDIV:
35039 cost = n * rs6000_cost->sdiv;
35040 break;
35041 case TYPE_DDIV:
35042 cost = n * rs6000_cost->ddiv;
35043 break;
35045 case TYPE_SYNC:
35046 case TYPE_LOAD_L:
35047 case TYPE_MFCR:
35048 case TYPE_MFCRF:
35049 cost = COSTS_N_INSNS (n + 2);
35050 break;
35052 default:
35053 cost = COSTS_N_INSNS (n);
35056 return cost;
35059 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35061 static int
35062 rs6000_debug_address_cost (rtx x, machine_mode mode,
35063 addr_space_t as, bool speed)
35065 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
35067 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35068 ret, speed ? "true" : "false");
35069 debug_rtx (x);
35071 return ret;
35075 /* A C expression returning the cost of moving data from a register of class
35076 CLASS1 to one of CLASS2. */
35078 static int
35079 rs6000_register_move_cost (machine_mode mode,
35080 reg_class_t from, reg_class_t to)
35082 int ret;
35084 if (TARGET_DEBUG_COST)
35085 dbg_cost_ctrl++;
35087 /* Moves from/to GENERAL_REGS. */
35088 if (reg_classes_intersect_p (to, GENERAL_REGS)
35089 || reg_classes_intersect_p (from, GENERAL_REGS))
35091 reg_class_t rclass = from;
35093 if (! reg_classes_intersect_p (to, GENERAL_REGS))
35094 rclass = to;
35096 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
35097 ret = (rs6000_memory_move_cost (mode, rclass, false)
35098 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
35100 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35101 shift. */
35102 else if (rclass == CR_REGS)
35103 ret = 4;
35105 /* For those processors that have slow LR/CTR moves, make them more
35106 expensive than memory in order to bias spills to memory .*/
35107 else if ((rs6000_tune == PROCESSOR_POWER6
35108 || rs6000_tune == PROCESSOR_POWER7
35109 || rs6000_tune == PROCESSOR_POWER8
35110 || rs6000_tune == PROCESSOR_POWER9)
35111 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35112 ret = 6 * hard_regno_nregs (0, mode);
35114 else
35115 /* A move will cost one instruction per GPR moved. */
35116 ret = 2 * hard_regno_nregs (0, mode);
35119 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35120 else if (VECTOR_MEM_VSX_P (mode)
35121 && reg_classes_intersect_p (to, VSX_REGS)
35122 && reg_classes_intersect_p (from, VSX_REGS))
35123 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35125 /* Moving between two similar registers is just one instruction. */
35126 else if (reg_classes_intersect_p (to, from))
35127 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35129 /* Everything else has to go through GENERAL_REGS. */
35130 else
35131 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35132 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35134 if (TARGET_DEBUG_COST)
35136 if (dbg_cost_ctrl == 1)
35137 fprintf (stderr,
35138 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35139 ret, GET_MODE_NAME (mode), reg_class_names[from],
35140 reg_class_names[to]);
35141 dbg_cost_ctrl--;
35144 return ret;
35147 /* A C expressions returning the cost of moving data of MODE from a register to
35148 or from memory. */
35150 static int
35151 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35152 bool in ATTRIBUTE_UNUSED)
35154 int ret;
35156 if (TARGET_DEBUG_COST)
35157 dbg_cost_ctrl++;
35159 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35160 ret = 4 * hard_regno_nregs (0, mode);
35161 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35162 || reg_classes_intersect_p (rclass, VSX_REGS)))
35163 ret = 4 * hard_regno_nregs (32, mode);
35164 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35165 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35166 else
35167 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35169 if (TARGET_DEBUG_COST)
35171 if (dbg_cost_ctrl == 1)
35172 fprintf (stderr,
35173 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35174 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35175 dbg_cost_ctrl--;
35178 return ret;
35181 /* Returns a code for a target-specific builtin that implements
35182 reciprocal of the function, or NULL_TREE if not available. */
35184 static tree
35185 rs6000_builtin_reciprocal (tree fndecl)
35187 switch (DECL_FUNCTION_CODE (fndecl))
35189 case VSX_BUILTIN_XVSQRTDP:
35190 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35191 return NULL_TREE;
35193 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35195 case VSX_BUILTIN_XVSQRTSP:
35196 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35197 return NULL_TREE;
35199 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35201 default:
35202 return NULL_TREE;
35206 /* Load up a constant. If the mode is a vector mode, splat the value across
35207 all of the vector elements. */
35209 static rtx
35210 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35212 rtx reg;
35214 if (mode == SFmode || mode == DFmode)
35216 rtx d = const_double_from_real_value (dconst, mode);
35217 reg = force_reg (mode, d);
35219 else if (mode == V4SFmode)
35221 rtx d = const_double_from_real_value (dconst, SFmode);
35222 rtvec v = gen_rtvec (4, d, d, d, d);
35223 reg = gen_reg_rtx (mode);
35224 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35226 else if (mode == V2DFmode)
35228 rtx d = const_double_from_real_value (dconst, DFmode);
35229 rtvec v = gen_rtvec (2, d, d);
35230 reg = gen_reg_rtx (mode);
35231 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35233 else
35234 gcc_unreachable ();
35236 return reg;
35239 /* Generate an FMA instruction. */
35241 static void
35242 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35244 machine_mode mode = GET_MODE (target);
35245 rtx dst;
35247 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35248 gcc_assert (dst != NULL);
35250 if (dst != target)
35251 emit_move_insn (target, dst);
35254 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35256 static void
35257 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35259 machine_mode mode = GET_MODE (dst);
35260 rtx r;
35262 /* This is a tad more complicated, since the fnma_optab is for
35263 a different expression: fma(-m1, m2, a), which is the same
35264 thing except in the case of signed zeros.
35266 Fortunately we know that if FMA is supported that FNMSUB is
35267 also supported in the ISA. Just expand it directly. */
35269 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35271 r = gen_rtx_NEG (mode, a);
35272 r = gen_rtx_FMA (mode, m1, m2, r);
35273 r = gen_rtx_NEG (mode, r);
35274 emit_insn (gen_rtx_SET (dst, r));
35277 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35278 add a reg_note saying that this was a division. Support both scalar and
35279 vector divide. Assumes no trapping math and finite arguments. */
35281 void
35282 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35284 machine_mode mode = GET_MODE (dst);
35285 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35286 int i;
35288 /* Low precision estimates guarantee 5 bits of accuracy. High
35289 precision estimates guarantee 14 bits of accuracy. SFmode
35290 requires 23 bits of accuracy. DFmode requires 52 bits of
35291 accuracy. Each pass at least doubles the accuracy, leading
35292 to the following. */
35293 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35294 if (mode == DFmode || mode == V2DFmode)
35295 passes++;
35297 enum insn_code code = optab_handler (smul_optab, mode);
35298 insn_gen_fn gen_mul = GEN_FCN (code);
35300 gcc_assert (code != CODE_FOR_nothing);
35302 one = rs6000_load_constant_and_splat (mode, dconst1);
35304 /* x0 = 1./d estimate */
35305 x0 = gen_reg_rtx (mode);
35306 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35307 UNSPEC_FRES)));
35309 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35310 if (passes > 1) {
35312 /* e0 = 1. - d * x0 */
35313 e0 = gen_reg_rtx (mode);
35314 rs6000_emit_nmsub (e0, d, x0, one);
35316 /* x1 = x0 + e0 * x0 */
35317 x1 = gen_reg_rtx (mode);
35318 rs6000_emit_madd (x1, e0, x0, x0);
35320 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35321 ++i, xprev = xnext, eprev = enext) {
35323 /* enext = eprev * eprev */
35324 enext = gen_reg_rtx (mode);
35325 emit_insn (gen_mul (enext, eprev, eprev));
35327 /* xnext = xprev + enext * xprev */
35328 xnext = gen_reg_rtx (mode);
35329 rs6000_emit_madd (xnext, enext, xprev, xprev);
35332 } else
35333 xprev = x0;
35335 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35337 /* u = n * xprev */
35338 u = gen_reg_rtx (mode);
35339 emit_insn (gen_mul (u, n, xprev));
35341 /* v = n - (d * u) */
35342 v = gen_reg_rtx (mode);
35343 rs6000_emit_nmsub (v, d, u, n);
35345 /* dst = (v * xprev) + u */
35346 rs6000_emit_madd (dst, v, xprev, u);
35348 if (note_p)
35349 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35352 /* Goldschmidt's Algorithm for single/double-precision floating point
35353 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35355 void
35356 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35358 machine_mode mode = GET_MODE (src);
35359 rtx e = gen_reg_rtx (mode);
35360 rtx g = gen_reg_rtx (mode);
35361 rtx h = gen_reg_rtx (mode);
35363 /* Low precision estimates guarantee 5 bits of accuracy. High
35364 precision estimates guarantee 14 bits of accuracy. SFmode
35365 requires 23 bits of accuracy. DFmode requires 52 bits of
35366 accuracy. Each pass at least doubles the accuracy, leading
35367 to the following. */
35368 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35369 if (mode == DFmode || mode == V2DFmode)
35370 passes++;
35372 int i;
35373 rtx mhalf;
35374 enum insn_code code = optab_handler (smul_optab, mode);
35375 insn_gen_fn gen_mul = GEN_FCN (code);
35377 gcc_assert (code != CODE_FOR_nothing);
35379 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35381 /* e = rsqrt estimate */
35382 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35383 UNSPEC_RSQRT)));
35385 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35386 if (!recip)
35388 rtx zero = force_reg (mode, CONST0_RTX (mode));
35390 if (mode == SFmode)
35392 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35393 e, zero, mode, 0);
35394 if (target != e)
35395 emit_move_insn (e, target);
35397 else
35399 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35400 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35404 /* g = sqrt estimate. */
35405 emit_insn (gen_mul (g, e, src));
35406 /* h = 1/(2*sqrt) estimate. */
35407 emit_insn (gen_mul (h, e, mhalf));
35409 if (recip)
35411 if (passes == 1)
35413 rtx t = gen_reg_rtx (mode);
35414 rs6000_emit_nmsub (t, g, h, mhalf);
35415 /* Apply correction directly to 1/rsqrt estimate. */
35416 rs6000_emit_madd (dst, e, t, e);
35418 else
35420 for (i = 0; i < passes; i++)
35422 rtx t1 = gen_reg_rtx (mode);
35423 rtx g1 = gen_reg_rtx (mode);
35424 rtx h1 = gen_reg_rtx (mode);
35426 rs6000_emit_nmsub (t1, g, h, mhalf);
35427 rs6000_emit_madd (g1, g, t1, g);
35428 rs6000_emit_madd (h1, h, t1, h);
35430 g = g1;
35431 h = h1;
35433 /* Multiply by 2 for 1/rsqrt. */
35434 emit_insn (gen_add3_insn (dst, h, h));
35437 else
35439 rtx t = gen_reg_rtx (mode);
35440 rs6000_emit_nmsub (t, g, h, mhalf);
35441 rs6000_emit_madd (dst, g, t, g);
35444 return;
35447 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35448 (Power7) targets. DST is the target, and SRC is the argument operand. */
35450 void
35451 rs6000_emit_popcount (rtx dst, rtx src)
35453 machine_mode mode = GET_MODE (dst);
35454 rtx tmp1, tmp2;
35456 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35457 if (TARGET_POPCNTD)
35459 if (mode == SImode)
35460 emit_insn (gen_popcntdsi2 (dst, src));
35461 else
35462 emit_insn (gen_popcntddi2 (dst, src));
35463 return;
35466 tmp1 = gen_reg_rtx (mode);
35468 if (mode == SImode)
35470 emit_insn (gen_popcntbsi2 (tmp1, src));
35471 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35472 NULL_RTX, 0);
35473 tmp2 = force_reg (SImode, tmp2);
35474 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35476 else
35478 emit_insn (gen_popcntbdi2 (tmp1, src));
35479 tmp2 = expand_mult (DImode, tmp1,
35480 GEN_INT ((HOST_WIDE_INT)
35481 0x01010101 << 32 | 0x01010101),
35482 NULL_RTX, 0);
35483 tmp2 = force_reg (DImode, tmp2);
35484 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35489 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35490 target, and SRC is the argument operand. */
35492 void
35493 rs6000_emit_parity (rtx dst, rtx src)
35495 machine_mode mode = GET_MODE (dst);
35496 rtx tmp;
35498 tmp = gen_reg_rtx (mode);
35500 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35501 if (TARGET_CMPB)
35503 if (mode == SImode)
35505 emit_insn (gen_popcntbsi2 (tmp, src));
35506 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35508 else
35510 emit_insn (gen_popcntbdi2 (tmp, src));
35511 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35513 return;
35516 if (mode == SImode)
35518 /* Is mult+shift >= shift+xor+shift+xor? */
35519 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35521 rtx tmp1, tmp2, tmp3, tmp4;
35523 tmp1 = gen_reg_rtx (SImode);
35524 emit_insn (gen_popcntbsi2 (tmp1, src));
35526 tmp2 = gen_reg_rtx (SImode);
35527 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35528 tmp3 = gen_reg_rtx (SImode);
35529 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35531 tmp4 = gen_reg_rtx (SImode);
35532 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35533 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35535 else
35536 rs6000_emit_popcount (tmp, src);
35537 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35539 else
35541 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35542 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35544 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35546 tmp1 = gen_reg_rtx (DImode);
35547 emit_insn (gen_popcntbdi2 (tmp1, src));
35549 tmp2 = gen_reg_rtx (DImode);
35550 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35551 tmp3 = gen_reg_rtx (DImode);
35552 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35554 tmp4 = gen_reg_rtx (DImode);
35555 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35556 tmp5 = gen_reg_rtx (DImode);
35557 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35559 tmp6 = gen_reg_rtx (DImode);
35560 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35561 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35563 else
35564 rs6000_emit_popcount (tmp, src);
35565 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35569 /* Expand an Altivec constant permutation for little endian mode.
35570 OP0 and OP1 are the input vectors and TARGET is the output vector.
35571 SEL specifies the constant permutation vector.
35573 There are two issues: First, the two input operands must be
35574 swapped so that together they form a double-wide array in LE
35575 order. Second, the vperm instruction has surprising behavior
35576 in LE mode: it interprets the elements of the source vectors
35577 in BE mode ("left to right") and interprets the elements of
35578 the destination vector in LE mode ("right to left"). To
35579 correct for this, we must subtract each element of the permute
35580 control vector from 31.
35582 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35583 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35584 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35585 serve as the permute control vector. Then, in BE mode,
35587 vperm 9,10,11,12
35589 places the desired result in vr9. However, in LE mode the
35590 vector contents will be
35592 vr10 = 00000003 00000002 00000001 00000000
35593 vr11 = 00000007 00000006 00000005 00000004
35595 The result of the vperm using the same permute control vector is
35597 vr9 = 05000000 07000000 01000000 03000000
35599 That is, the leftmost 4 bytes of vr10 are interpreted as the
35600 source for the rightmost 4 bytes of vr9, and so on.
35602 If we change the permute control vector to
35604 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35606 and issue
35608 vperm 9,11,10,12
35610 we get the desired
35612 vr9 = 00000006 00000004 00000002 00000000. */
35614 static void
35615 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35616 const vec_perm_indices &sel)
35618 unsigned int i;
35619 rtx perm[16];
35620 rtx constv, unspec;
35622 /* Unpack and adjust the constant selector. */
35623 for (i = 0; i < 16; ++i)
35625 unsigned int elt = 31 - (sel[i] & 31);
35626 perm[i] = GEN_INT (elt);
35629 /* Expand to a permute, swapping the inputs and using the
35630 adjusted selector. */
35631 if (!REG_P (op0))
35632 op0 = force_reg (V16QImode, op0);
35633 if (!REG_P (op1))
35634 op1 = force_reg (V16QImode, op1);
35636 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35637 constv = force_reg (V16QImode, constv);
35638 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35639 UNSPEC_VPERM);
35640 if (!REG_P (target))
35642 rtx tmp = gen_reg_rtx (V16QImode);
35643 emit_move_insn (tmp, unspec);
35644 unspec = tmp;
35647 emit_move_insn (target, unspec);
35650 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35651 permute control vector. But here it's not a constant, so we must
35652 generate a vector NAND or NOR to do the adjustment. */
35654 void
35655 altivec_expand_vec_perm_le (rtx operands[4])
35657 rtx notx, iorx, unspec;
35658 rtx target = operands[0];
35659 rtx op0 = operands[1];
35660 rtx op1 = operands[2];
35661 rtx sel = operands[3];
35662 rtx tmp = target;
35663 rtx norreg = gen_reg_rtx (V16QImode);
35664 machine_mode mode = GET_MODE (target);
35666 /* Get everything in regs so the pattern matches. */
35667 if (!REG_P (op0))
35668 op0 = force_reg (mode, op0);
35669 if (!REG_P (op1))
35670 op1 = force_reg (mode, op1);
35671 if (!REG_P (sel))
35672 sel = force_reg (V16QImode, sel);
35673 if (!REG_P (target))
35674 tmp = gen_reg_rtx (mode);
35676 if (TARGET_P9_VECTOR)
35678 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35679 UNSPEC_VPERMR);
35681 else
35683 /* Invert the selector with a VNAND if available, else a VNOR.
35684 The VNAND is preferred for future fusion opportunities. */
35685 notx = gen_rtx_NOT (V16QImode, sel);
35686 iorx = (TARGET_P8_VECTOR
35687 ? gen_rtx_IOR (V16QImode, notx, notx)
35688 : gen_rtx_AND (V16QImode, notx, notx));
35689 emit_insn (gen_rtx_SET (norreg, iorx));
35691 /* Permute with operands reversed and adjusted selector. */
35692 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35693 UNSPEC_VPERM);
35696 /* Copy into target, possibly by way of a register. */
35697 if (!REG_P (target))
35699 emit_move_insn (tmp, unspec);
35700 unspec = tmp;
35703 emit_move_insn (target, unspec);
35706 /* Expand an Altivec constant permutation. Return true if we match
35707 an efficient implementation; false to fall back to VPERM.
35709 OP0 and OP1 are the input vectors and TARGET is the output vector.
35710 SEL specifies the constant permutation vector. */
35712 static bool
35713 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35714 const vec_perm_indices &sel)
35716 struct altivec_perm_insn {
35717 HOST_WIDE_INT mask;
35718 enum insn_code impl;
35719 unsigned char perm[16];
35721 static const struct altivec_perm_insn patterns[] = {
35722 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35723 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35724 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35725 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35726 { OPTION_MASK_ALTIVEC,
35727 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35728 : CODE_FOR_altivec_vmrglb_direct),
35729 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35730 { OPTION_MASK_ALTIVEC,
35731 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35732 : CODE_FOR_altivec_vmrglh_direct),
35733 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35734 { OPTION_MASK_ALTIVEC,
35735 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35736 : CODE_FOR_altivec_vmrglw_direct),
35737 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35738 { OPTION_MASK_ALTIVEC,
35739 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35740 : CODE_FOR_altivec_vmrghb_direct),
35741 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35742 { OPTION_MASK_ALTIVEC,
35743 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35744 : CODE_FOR_altivec_vmrghh_direct),
35745 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35746 { OPTION_MASK_ALTIVEC,
35747 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35748 : CODE_FOR_altivec_vmrghw_direct),
35749 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35750 { OPTION_MASK_P8_VECTOR,
35751 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35752 : CODE_FOR_p8_vmrgow_v4sf_direct),
35753 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35754 { OPTION_MASK_P8_VECTOR,
35755 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35756 : CODE_FOR_p8_vmrgew_v4sf_direct),
35757 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35760 unsigned int i, j, elt, which;
35761 unsigned char perm[16];
35762 rtx x;
35763 bool one_vec;
35765 /* Unpack the constant selector. */
35766 for (i = which = 0; i < 16; ++i)
35768 elt = sel[i] & 31;
35769 which |= (elt < 16 ? 1 : 2);
35770 perm[i] = elt;
35773 /* Simplify the constant selector based on operands. */
35774 switch (which)
35776 default:
35777 gcc_unreachable ();
35779 case 3:
35780 one_vec = false;
35781 if (!rtx_equal_p (op0, op1))
35782 break;
35783 /* FALLTHRU */
35785 case 2:
35786 for (i = 0; i < 16; ++i)
35787 perm[i] &= 15;
35788 op0 = op1;
35789 one_vec = true;
35790 break;
35792 case 1:
35793 op1 = op0;
35794 one_vec = true;
35795 break;
35798 /* Look for splat patterns. */
35799 if (one_vec)
35801 elt = perm[0];
35803 for (i = 0; i < 16; ++i)
35804 if (perm[i] != elt)
35805 break;
35806 if (i == 16)
35808 if (!BYTES_BIG_ENDIAN)
35809 elt = 15 - elt;
35810 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35811 return true;
35814 if (elt % 2 == 0)
35816 for (i = 0; i < 16; i += 2)
35817 if (perm[i] != elt || perm[i + 1] != elt + 1)
35818 break;
35819 if (i == 16)
35821 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35822 x = gen_reg_rtx (V8HImode);
35823 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35824 GEN_INT (field)));
35825 emit_move_insn (target, gen_lowpart (V16QImode, x));
35826 return true;
35830 if (elt % 4 == 0)
35832 for (i = 0; i < 16; i += 4)
35833 if (perm[i] != elt
35834 || perm[i + 1] != elt + 1
35835 || perm[i + 2] != elt + 2
35836 || perm[i + 3] != elt + 3)
35837 break;
35838 if (i == 16)
35840 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35841 x = gen_reg_rtx (V4SImode);
35842 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35843 GEN_INT (field)));
35844 emit_move_insn (target, gen_lowpart (V16QImode, x));
35845 return true;
35850 /* Look for merge and pack patterns. */
35851 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35853 bool swapped;
35855 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35856 continue;
35858 elt = patterns[j].perm[0];
35859 if (perm[0] == elt)
35860 swapped = false;
35861 else if (perm[0] == elt + 16)
35862 swapped = true;
35863 else
35864 continue;
35865 for (i = 1; i < 16; ++i)
35867 elt = patterns[j].perm[i];
35868 if (swapped)
35869 elt = (elt >= 16 ? elt - 16 : elt + 16);
35870 else if (one_vec && elt >= 16)
35871 elt -= 16;
35872 if (perm[i] != elt)
35873 break;
35875 if (i == 16)
35877 enum insn_code icode = patterns[j].impl;
35878 machine_mode omode = insn_data[icode].operand[0].mode;
35879 machine_mode imode = insn_data[icode].operand[1].mode;
35881 /* For little-endian, don't use vpkuwum and vpkuhum if the
35882 underlying vector type is not V4SI and V8HI, respectively.
35883 For example, using vpkuwum with a V8HI picks up the even
35884 halfwords (BE numbering) when the even halfwords (LE
35885 numbering) are what we need. */
35886 if (!BYTES_BIG_ENDIAN
35887 && icode == CODE_FOR_altivec_vpkuwum_direct
35888 && ((GET_CODE (op0) == REG
35889 && GET_MODE (op0) != V4SImode)
35890 || (GET_CODE (op0) == SUBREG
35891 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35892 continue;
35893 if (!BYTES_BIG_ENDIAN
35894 && icode == CODE_FOR_altivec_vpkuhum_direct
35895 && ((GET_CODE (op0) == REG
35896 && GET_MODE (op0) != V8HImode)
35897 || (GET_CODE (op0) == SUBREG
35898 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35899 continue;
35901 /* For little-endian, the two input operands must be swapped
35902 (or swapped back) to ensure proper right-to-left numbering
35903 from 0 to 2N-1. */
35904 if (swapped ^ !BYTES_BIG_ENDIAN)
35905 std::swap (op0, op1);
35906 if (imode != V16QImode)
35908 op0 = gen_lowpart (imode, op0);
35909 op1 = gen_lowpart (imode, op1);
35911 if (omode == V16QImode)
35912 x = target;
35913 else
35914 x = gen_reg_rtx (omode);
35915 emit_insn (GEN_FCN (icode) (x, op0, op1));
35916 if (omode != V16QImode)
35917 emit_move_insn (target, gen_lowpart (V16QImode, x));
35918 return true;
35922 if (!BYTES_BIG_ENDIAN)
35924 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35925 return true;
35928 return false;
35931 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35932 Return true if we match an efficient implementation. */
35934 static bool
35935 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35936 unsigned char perm0, unsigned char perm1)
35938 rtx x;
35940 /* If both selectors come from the same operand, fold to single op. */
35941 if ((perm0 & 2) == (perm1 & 2))
35943 if (perm0 & 2)
35944 op0 = op1;
35945 else
35946 op1 = op0;
35948 /* If both operands are equal, fold to simpler permutation. */
35949 if (rtx_equal_p (op0, op1))
35951 perm0 = perm0 & 1;
35952 perm1 = (perm1 & 1) + 2;
35954 /* If the first selector comes from the second operand, swap. */
35955 else if (perm0 & 2)
35957 if (perm1 & 2)
35958 return false;
35959 perm0 -= 2;
35960 perm1 += 2;
35961 std::swap (op0, op1);
35963 /* If the second selector does not come from the second operand, fail. */
35964 else if ((perm1 & 2) == 0)
35965 return false;
35967 /* Success! */
35968 if (target != NULL)
35970 machine_mode vmode, dmode;
35971 rtvec v;
35973 vmode = GET_MODE (target);
35974 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35975 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35976 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35977 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35978 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35979 emit_insn (gen_rtx_SET (target, x));
35981 return true;
35984 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35986 static bool
35987 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35988 rtx op1, const vec_perm_indices &sel)
35990 bool testing_p = !target;
35992 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35993 if (TARGET_ALTIVEC && testing_p)
35994 return true;
35996 /* Check for ps_merge* or xxpermdi insns. */
35997 if ((vmode == V2SFmode && TARGET_PAIRED_FLOAT)
35998 || ((vmode == V2DFmode || vmode == V2DImode)
35999 && VECTOR_MEM_VSX_P (vmode)))
36001 if (testing_p)
36003 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
36004 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
36006 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
36007 return true;
36010 if (TARGET_ALTIVEC)
36012 /* Force the target-independent code to lower to V16QImode. */
36013 if (vmode != V16QImode)
36014 return false;
36015 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
36016 return true;
36019 return false;
36022 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
36023 OP0 and OP1 are the input vectors and TARGET is the output vector.
36024 PERM specifies the constant permutation vector. */
36026 static void
36027 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
36028 machine_mode vmode, const vec_perm_builder &perm)
36030 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
36031 if (x != target)
36032 emit_move_insn (target, x);
36035 /* Expand an extract even operation. */
36037 void
36038 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
36040 machine_mode vmode = GET_MODE (target);
36041 unsigned i, nelt = GET_MODE_NUNITS (vmode);
36042 vec_perm_builder perm (nelt, nelt, 1);
36044 for (i = 0; i < nelt; i++)
36045 perm.quick_push (i * 2);
36047 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36050 /* Expand a vector interleave operation. */
36052 void
36053 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
36055 machine_mode vmode = GET_MODE (target);
36056 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
36057 vec_perm_builder perm (nelt, nelt, 1);
36059 high = (highp ? 0 : nelt / 2);
36060 for (i = 0; i < nelt / 2; i++)
36062 perm.quick_push (i + high);
36063 perm.quick_push (i + nelt + high);
36066 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36069 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36070 void
36071 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
36073 HOST_WIDE_INT hwi_scale (scale);
36074 REAL_VALUE_TYPE r_pow;
36075 rtvec v = rtvec_alloc (2);
36076 rtx elt;
36077 rtx scale_vec = gen_reg_rtx (V2DFmode);
36078 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
36079 elt = const_double_from_real_value (r_pow, DFmode);
36080 RTVEC_ELT (v, 0) = elt;
36081 RTVEC_ELT (v, 1) = elt;
36082 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
36083 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
36086 /* Return an RTX representing where to find the function value of a
36087 function returning MODE. */
36088 static rtx
36089 rs6000_complex_function_value (machine_mode mode)
36091 unsigned int regno;
36092 rtx r1, r2;
36093 machine_mode inner = GET_MODE_INNER (mode);
36094 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
36096 if (TARGET_FLOAT128_TYPE
36097 && (mode == KCmode
36098 || (mode == TCmode && TARGET_IEEEQUAD)))
36099 regno = ALTIVEC_ARG_RETURN;
36101 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36102 regno = FP_ARG_RETURN;
36104 else
36106 regno = GP_ARG_RETURN;
36108 /* 32-bit is OK since it'll go in r3/r4. */
36109 if (TARGET_32BIT && inner_bytes >= 4)
36110 return gen_rtx_REG (mode, regno);
36113 if (inner_bytes >= 8)
36114 return gen_rtx_REG (mode, regno);
36116 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36117 const0_rtx);
36118 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36119 GEN_INT (inner_bytes));
36120 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36123 /* Return an rtx describing a return value of MODE as a PARALLEL
36124 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36125 stride REG_STRIDE. */
36127 static rtx
36128 rs6000_parallel_return (machine_mode mode,
36129 int n_elts, machine_mode elt_mode,
36130 unsigned int regno, unsigned int reg_stride)
36132 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36134 int i;
36135 for (i = 0; i < n_elts; i++)
36137 rtx r = gen_rtx_REG (elt_mode, regno);
36138 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36139 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36140 regno += reg_stride;
36143 return par;
36146 /* Target hook for TARGET_FUNCTION_VALUE.
36148 An integer value is in r3 and a floating-point value is in fp1,
36149 unless -msoft-float. */
36151 static rtx
36152 rs6000_function_value (const_tree valtype,
36153 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36154 bool outgoing ATTRIBUTE_UNUSED)
36156 machine_mode mode;
36157 unsigned int regno;
36158 machine_mode elt_mode;
36159 int n_elts;
36161 /* Special handling for structs in darwin64. */
36162 if (TARGET_MACHO
36163 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36165 CUMULATIVE_ARGS valcum;
36166 rtx valret;
36168 valcum.words = 0;
36169 valcum.fregno = FP_ARG_MIN_REG;
36170 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36171 /* Do a trial code generation as if this were going to be passed as
36172 an argument; if any part goes in memory, we return NULL. */
36173 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36174 if (valret)
36175 return valret;
36176 /* Otherwise fall through to standard ABI rules. */
36179 mode = TYPE_MODE (valtype);
36181 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36182 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36184 int first_reg, n_regs;
36186 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36188 /* _Decimal128 must use even/odd register pairs. */
36189 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36190 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36192 else
36194 first_reg = ALTIVEC_ARG_RETURN;
36195 n_regs = 1;
36198 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36201 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36202 if (TARGET_32BIT && TARGET_POWERPC64)
36203 switch (mode)
36205 default:
36206 break;
36207 case E_DImode:
36208 case E_SCmode:
36209 case E_DCmode:
36210 case E_TCmode:
36211 int count = GET_MODE_SIZE (mode) / 4;
36212 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36215 if ((INTEGRAL_TYPE_P (valtype)
36216 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36217 || POINTER_TYPE_P (valtype))
36218 mode = TARGET_32BIT ? SImode : DImode;
36220 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36221 /* _Decimal128 must use an even/odd register pair. */
36222 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36223 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36224 && !FLOAT128_VECTOR_P (mode)
36225 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
36226 regno = FP_ARG_RETURN;
36227 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36228 && targetm.calls.split_complex_arg)
36229 return rs6000_complex_function_value (mode);
36230 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36231 return register is used in both cases, and we won't see V2DImode/V2DFmode
36232 for pure altivec, combine the two cases. */
36233 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36234 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36235 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36236 regno = ALTIVEC_ARG_RETURN;
36237 else
36238 regno = GP_ARG_RETURN;
36240 return gen_rtx_REG (mode, regno);
36243 /* Define how to find the value returned by a library function
36244 assuming the value has mode MODE. */
36246 rs6000_libcall_value (machine_mode mode)
36248 unsigned int regno;
36250 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36251 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36252 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36254 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36255 /* _Decimal128 must use an even/odd register pair. */
36256 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36257 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
36258 && TARGET_HARD_FLOAT
36259 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
36260 regno = FP_ARG_RETURN;
36261 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36262 return register is used in both cases, and we won't see V2DImode/V2DFmode
36263 for pure altivec, combine the two cases. */
36264 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36265 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36266 regno = ALTIVEC_ARG_RETURN;
36267 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36268 return rs6000_complex_function_value (mode);
36269 else
36270 regno = GP_ARG_RETURN;
36272 return gen_rtx_REG (mode, regno);
36275 /* Compute register pressure classes. We implement the target hook to avoid
36276 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36277 lead to incorrect estimates of number of available registers and therefor
36278 increased register pressure/spill. */
36279 static int
36280 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36282 int n;
36284 n = 0;
36285 pressure_classes[n++] = GENERAL_REGS;
36286 if (TARGET_VSX)
36287 pressure_classes[n++] = VSX_REGS;
36288 else
36290 if (TARGET_ALTIVEC)
36291 pressure_classes[n++] = ALTIVEC_REGS;
36292 if (TARGET_HARD_FLOAT)
36293 pressure_classes[n++] = FLOAT_REGS;
36295 pressure_classes[n++] = CR_REGS;
36296 pressure_classes[n++] = SPECIAL_REGS;
36298 return n;
36301 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36302 Frame pointer elimination is automatically handled.
36304 For the RS/6000, if frame pointer elimination is being done, we would like
36305 to convert ap into fp, not sp.
36307 We need r30 if -mminimal-toc was specified, and there are constant pool
36308 references. */
36310 static bool
36311 rs6000_can_eliminate (const int from, const int to)
36313 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36314 ? ! frame_pointer_needed
36315 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36316 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36317 || constant_pool_empty_p ()
36318 : true);
36321 /* Define the offset between two registers, FROM to be eliminated and its
36322 replacement TO, at the start of a routine. */
36323 HOST_WIDE_INT
36324 rs6000_initial_elimination_offset (int from, int to)
36326 rs6000_stack_t *info = rs6000_stack_info ();
36327 HOST_WIDE_INT offset;
36329 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36330 offset = info->push_p ? 0 : -info->total_size;
36331 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36333 offset = info->push_p ? 0 : -info->total_size;
36334 if (FRAME_GROWS_DOWNWARD)
36335 offset += info->fixed_size + info->vars_size + info->parm_size;
36337 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36338 offset = FRAME_GROWS_DOWNWARD
36339 ? info->fixed_size + info->vars_size + info->parm_size
36340 : 0;
36341 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36342 offset = info->total_size;
36343 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36344 offset = info->push_p ? info->total_size : 0;
36345 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36346 offset = 0;
36347 else
36348 gcc_unreachable ();
36350 return offset;
36353 /* Fill in sizes of registers used by unwinder. */
36355 static void
36356 rs6000_init_dwarf_reg_sizes_extra (tree address)
36358 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36360 int i;
36361 machine_mode mode = TYPE_MODE (char_type_node);
36362 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36363 rtx mem = gen_rtx_MEM (BLKmode, addr);
36364 rtx value = gen_int_mode (16, mode);
36366 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36367 The unwinder still needs to know the size of Altivec registers. */
36369 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36371 int column = DWARF_REG_TO_UNWIND_COLUMN
36372 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36373 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36375 emit_move_insn (adjust_address (mem, mode, offset), value);
36380 /* Map internal gcc register numbers to debug format register numbers.
36381 FORMAT specifies the type of debug register number to use:
36382 0 -- debug information, except for frame-related sections
36383 1 -- DWARF .debug_frame section
36384 2 -- DWARF .eh_frame section */
36386 unsigned int
36387 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36389 /* Except for the above, we use the internal number for non-DWARF
36390 debug information, and also for .eh_frame. */
36391 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36392 return regno;
36394 /* On some platforms, we use the standard DWARF register
36395 numbering for .debug_info and .debug_frame. */
36396 #ifdef RS6000_USE_DWARF_NUMBERING
36397 if (regno <= 63)
36398 return regno;
36399 if (regno == LR_REGNO)
36400 return 108;
36401 if (regno == CTR_REGNO)
36402 return 109;
36403 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36404 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36405 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36406 to the DWARF reg for CR. */
36407 if (format == 1 && regno == CR2_REGNO)
36408 return 64;
36409 if (CR_REGNO_P (regno))
36410 return regno - CR0_REGNO + 86;
36411 if (regno == CA_REGNO)
36412 return 101; /* XER */
36413 if (ALTIVEC_REGNO_P (regno))
36414 return regno - FIRST_ALTIVEC_REGNO + 1124;
36415 if (regno == VRSAVE_REGNO)
36416 return 356;
36417 if (regno == VSCR_REGNO)
36418 return 67;
36419 #endif
36420 return regno;
36423 /* target hook eh_return_filter_mode */
36424 static scalar_int_mode
36425 rs6000_eh_return_filter_mode (void)
36427 return TARGET_32BIT ? SImode : word_mode;
36430 /* Target hook for scalar_mode_supported_p. */
36431 static bool
36432 rs6000_scalar_mode_supported_p (scalar_mode mode)
36434 /* -m32 does not support TImode. This is the default, from
36435 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36436 same ABI as for -m32. But default_scalar_mode_supported_p allows
36437 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36438 for -mpowerpc64. */
36439 if (TARGET_32BIT && mode == TImode)
36440 return false;
36442 if (DECIMAL_FLOAT_MODE_P (mode))
36443 return default_decimal_float_supported_p ();
36444 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36445 return true;
36446 else
36447 return default_scalar_mode_supported_p (mode);
36450 /* Target hook for vector_mode_supported_p. */
36451 static bool
36452 rs6000_vector_mode_supported_p (machine_mode mode)
36455 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36456 return true;
36458 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36459 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36460 double-double. */
36461 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36462 return true;
36464 else
36465 return false;
36468 /* Target hook for floatn_mode. */
36469 static opt_scalar_float_mode
36470 rs6000_floatn_mode (int n, bool extended)
36472 if (extended)
36474 switch (n)
36476 case 32:
36477 return DFmode;
36479 case 64:
36480 if (TARGET_FLOAT128_TYPE)
36481 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36482 else
36483 return opt_scalar_float_mode ();
36485 case 128:
36486 return opt_scalar_float_mode ();
36488 default:
36489 /* Those are the only valid _FloatNx types. */
36490 gcc_unreachable ();
36493 else
36495 switch (n)
36497 case 32:
36498 return SFmode;
36500 case 64:
36501 return DFmode;
36503 case 128:
36504 if (TARGET_FLOAT128_TYPE)
36505 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36506 else
36507 return opt_scalar_float_mode ();
36509 default:
36510 return opt_scalar_float_mode ();
36516 /* Target hook for c_mode_for_suffix. */
36517 static machine_mode
36518 rs6000_c_mode_for_suffix (char suffix)
36520 if (TARGET_FLOAT128_TYPE)
36522 if (suffix == 'q' || suffix == 'Q')
36523 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36525 /* At the moment, we are not defining a suffix for IBM extended double.
36526 If/when the default for -mabi=ieeelongdouble is changed, and we want
36527 to support __ibm128 constants in legacy library code, we may need to
36528 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36529 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36530 __float80 constants. */
36533 return VOIDmode;
36536 /* Target hook for invalid_arg_for_unprototyped_fn. */
36537 static const char *
36538 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36540 return (!rs6000_darwin64_abi
36541 && typelist == 0
36542 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36543 && (funcdecl == NULL_TREE
36544 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36545 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36546 ? N_("AltiVec argument passed to unprototyped function")
36547 : NULL;
36550 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36551 setup by using __stack_chk_fail_local hidden function instead of
36552 calling __stack_chk_fail directly. Otherwise it is better to call
36553 __stack_chk_fail directly. */
36555 static tree ATTRIBUTE_UNUSED
36556 rs6000_stack_protect_fail (void)
36558 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36559 ? default_hidden_stack_protect_fail ()
36560 : default_external_stack_protect_fail ();
36563 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36565 #if TARGET_ELF
36566 static unsigned HOST_WIDE_INT
36567 rs6000_asan_shadow_offset (void)
36569 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36571 #endif
36573 /* Mask options that we want to support inside of attribute((target)) and
36574 #pragma GCC target operations. Note, we do not include things like
36575 64/32-bit, endianness, hard/soft floating point, etc. that would have
36576 different calling sequences. */
36578 struct rs6000_opt_mask {
36579 const char *name; /* option name */
36580 HOST_WIDE_INT mask; /* mask to set */
36581 bool invert; /* invert sense of mask */
36582 bool valid_target; /* option is a target option */
36585 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36587 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36588 { "cmpb", OPTION_MASK_CMPB, false, true },
36589 { "crypto", OPTION_MASK_CRYPTO, false, true },
36590 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36591 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36592 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36593 false, true },
36594 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36595 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36596 { "fprnd", OPTION_MASK_FPRND, false, true },
36597 { "hard-dfp", OPTION_MASK_DFP, false, true },
36598 { "htm", OPTION_MASK_HTM, false, true },
36599 { "isel", OPTION_MASK_ISEL, false, true },
36600 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36601 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36602 { "modulo", OPTION_MASK_MODULO, false, true },
36603 { "mulhw", OPTION_MASK_MULHW, false, true },
36604 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36605 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36606 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36607 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36608 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36609 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36610 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36611 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36612 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36613 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36614 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36615 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36616 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36617 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36618 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36619 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36620 { "string", 0, false, true },
36621 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36622 { "update", OPTION_MASK_NO_UPDATE, true , true },
36623 { "vsx", OPTION_MASK_VSX, false, true },
36624 #ifdef OPTION_MASK_64BIT
36625 #if TARGET_AIX_OS
36626 { "aix64", OPTION_MASK_64BIT, false, false },
36627 { "aix32", OPTION_MASK_64BIT, true, false },
36628 #else
36629 { "64", OPTION_MASK_64BIT, false, false },
36630 { "32", OPTION_MASK_64BIT, true, false },
36631 #endif
36632 #endif
36633 #ifdef OPTION_MASK_EABI
36634 { "eabi", OPTION_MASK_EABI, false, false },
36635 #endif
36636 #ifdef OPTION_MASK_LITTLE_ENDIAN
36637 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36638 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36639 #endif
36640 #ifdef OPTION_MASK_RELOCATABLE
36641 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36642 #endif
36643 #ifdef OPTION_MASK_STRICT_ALIGN
36644 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36645 #endif
36646 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36647 { "string", 0, false, false },
36650 /* Builtin mask mapping for printing the flags. */
36651 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36653 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36654 { "vsx", RS6000_BTM_VSX, false, false },
36655 { "paired", RS6000_BTM_PAIRED, false, false },
36656 { "fre", RS6000_BTM_FRE, false, false },
36657 { "fres", RS6000_BTM_FRES, false, false },
36658 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36659 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36660 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36661 { "cell", RS6000_BTM_CELL, false, false },
36662 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36663 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36664 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36665 { "crypto", RS6000_BTM_CRYPTO, false, false },
36666 { "htm", RS6000_BTM_HTM, false, false },
36667 { "hard-dfp", RS6000_BTM_DFP, false, false },
36668 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36669 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36670 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36671 { "float128", RS6000_BTM_FLOAT128, false, false },
36672 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36675 /* Option variables that we want to support inside attribute((target)) and
36676 #pragma GCC target operations. */
36678 struct rs6000_opt_var {
36679 const char *name; /* option name */
36680 size_t global_offset; /* offset of the option in global_options. */
36681 size_t target_offset; /* offset of the option in target options. */
36684 static struct rs6000_opt_var const rs6000_opt_vars[] =
36686 { "friz",
36687 offsetof (struct gcc_options, x_TARGET_FRIZ),
36688 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36689 { "avoid-indexed-addresses",
36690 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36691 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36692 { "paired",
36693 offsetof (struct gcc_options, x_rs6000_paired_float),
36694 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36695 { "longcall",
36696 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36697 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36698 { "optimize-swaps",
36699 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36700 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36701 { "allow-movmisalign",
36702 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36703 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36704 { "sched-groups",
36705 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36706 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36707 { "always-hint",
36708 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36709 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36710 { "align-branch-targets",
36711 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36712 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36713 { "tls-markers",
36714 offsetof (struct gcc_options, x_tls_markers),
36715 offsetof (struct cl_target_option, x_tls_markers), },
36716 { "sched-prolog",
36717 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36718 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36719 { "sched-epilog",
36720 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36721 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36722 { "speculate-indirect-jumps",
36723 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36724 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36727 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36728 parsing. Return true if there were no errors. */
36730 static bool
36731 rs6000_inner_target_options (tree args, bool attr_p)
36733 bool ret = true;
36735 if (args == NULL_TREE)
36738 else if (TREE_CODE (args) == STRING_CST)
36740 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36741 char *q;
36743 while ((q = strtok (p, ",")) != NULL)
36745 bool error_p = false;
36746 bool not_valid_p = false;
36747 const char *cpu_opt = NULL;
36749 p = NULL;
36750 if (strncmp (q, "cpu=", 4) == 0)
36752 int cpu_index = rs6000_cpu_name_lookup (q+4);
36753 if (cpu_index >= 0)
36754 rs6000_cpu_index = cpu_index;
36755 else
36757 error_p = true;
36758 cpu_opt = q+4;
36761 else if (strncmp (q, "tune=", 5) == 0)
36763 int tune_index = rs6000_cpu_name_lookup (q+5);
36764 if (tune_index >= 0)
36765 rs6000_tune_index = tune_index;
36766 else
36768 error_p = true;
36769 cpu_opt = q+5;
36772 else
36774 size_t i;
36775 bool invert = false;
36776 char *r = q;
36778 error_p = true;
36779 if (strncmp (r, "no-", 3) == 0)
36781 invert = true;
36782 r += 3;
36785 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36786 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36788 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36790 if (!rs6000_opt_masks[i].valid_target)
36791 not_valid_p = true;
36792 else
36794 error_p = false;
36795 rs6000_isa_flags_explicit |= mask;
36797 /* VSX needs altivec, so -mvsx automagically sets
36798 altivec and disables -mavoid-indexed-addresses. */
36799 if (!invert)
36801 if (mask == OPTION_MASK_VSX)
36803 mask |= OPTION_MASK_ALTIVEC;
36804 TARGET_AVOID_XFORM = 0;
36808 if (rs6000_opt_masks[i].invert)
36809 invert = !invert;
36811 if (invert)
36812 rs6000_isa_flags &= ~mask;
36813 else
36814 rs6000_isa_flags |= mask;
36816 break;
36819 if (error_p && !not_valid_p)
36821 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36822 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36824 size_t j = rs6000_opt_vars[i].global_offset;
36825 *((int *) ((char *)&global_options + j)) = !invert;
36826 error_p = false;
36827 not_valid_p = false;
36828 break;
36833 if (error_p)
36835 const char *eprefix, *esuffix;
36837 ret = false;
36838 if (attr_p)
36840 eprefix = "__attribute__((__target__(";
36841 esuffix = ")))";
36843 else
36845 eprefix = "#pragma GCC target ";
36846 esuffix = "";
36849 if (cpu_opt)
36850 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36851 q, esuffix);
36852 else if (not_valid_p)
36853 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36854 else
36855 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36860 else if (TREE_CODE (args) == TREE_LIST)
36864 tree value = TREE_VALUE (args);
36865 if (value)
36867 bool ret2 = rs6000_inner_target_options (value, attr_p);
36868 if (!ret2)
36869 ret = false;
36871 args = TREE_CHAIN (args);
36873 while (args != NULL_TREE);
36876 else
36878 error ("attribute %<target%> argument not a string");
36879 return false;
36882 return ret;
36885 /* Print out the target options as a list for -mdebug=target. */
36887 static void
36888 rs6000_debug_target_options (tree args, const char *prefix)
36890 if (args == NULL_TREE)
36891 fprintf (stderr, "%s<NULL>", prefix);
36893 else if (TREE_CODE (args) == STRING_CST)
36895 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36896 char *q;
36898 while ((q = strtok (p, ",")) != NULL)
36900 p = NULL;
36901 fprintf (stderr, "%s\"%s\"", prefix, q);
36902 prefix = ", ";
36906 else if (TREE_CODE (args) == TREE_LIST)
36910 tree value = TREE_VALUE (args);
36911 if (value)
36913 rs6000_debug_target_options (value, prefix);
36914 prefix = ", ";
36916 args = TREE_CHAIN (args);
36918 while (args != NULL_TREE);
36921 else
36922 gcc_unreachable ();
36924 return;
36928 /* Hook to validate attribute((target("..."))). */
36930 static bool
36931 rs6000_valid_attribute_p (tree fndecl,
36932 tree ARG_UNUSED (name),
36933 tree args,
36934 int flags)
36936 struct cl_target_option cur_target;
36937 bool ret;
36938 tree old_optimize;
36939 tree new_target, new_optimize;
36940 tree func_optimize;
36942 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36944 if (TARGET_DEBUG_TARGET)
36946 tree tname = DECL_NAME (fndecl);
36947 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36948 if (tname)
36949 fprintf (stderr, "function: %.*s\n",
36950 (int) IDENTIFIER_LENGTH (tname),
36951 IDENTIFIER_POINTER (tname));
36952 else
36953 fprintf (stderr, "function: unknown\n");
36955 fprintf (stderr, "args:");
36956 rs6000_debug_target_options (args, " ");
36957 fprintf (stderr, "\n");
36959 if (flags)
36960 fprintf (stderr, "flags: 0x%x\n", flags);
36962 fprintf (stderr, "--------------------\n");
36965 /* attribute((target("default"))) does nothing, beyond
36966 affecting multi-versioning. */
36967 if (TREE_VALUE (args)
36968 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36969 && TREE_CHAIN (args) == NULL_TREE
36970 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36971 return true;
36973 old_optimize = build_optimization_node (&global_options);
36974 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36976 /* If the function changed the optimization levels as well as setting target
36977 options, start with the optimizations specified. */
36978 if (func_optimize && func_optimize != old_optimize)
36979 cl_optimization_restore (&global_options,
36980 TREE_OPTIMIZATION (func_optimize));
36982 /* The target attributes may also change some optimization flags, so update
36983 the optimization options if necessary. */
36984 cl_target_option_save (&cur_target, &global_options);
36985 rs6000_cpu_index = rs6000_tune_index = -1;
36986 ret = rs6000_inner_target_options (args, true);
36988 /* Set up any additional state. */
36989 if (ret)
36991 ret = rs6000_option_override_internal (false);
36992 new_target = build_target_option_node (&global_options);
36994 else
36995 new_target = NULL;
36997 new_optimize = build_optimization_node (&global_options);
36999 if (!new_target)
37000 ret = false;
37002 else if (fndecl)
37004 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
37006 if (old_optimize != new_optimize)
37007 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
37010 cl_target_option_restore (&global_options, &cur_target);
37012 if (old_optimize != new_optimize)
37013 cl_optimization_restore (&global_options,
37014 TREE_OPTIMIZATION (old_optimize));
37016 return ret;
37020 /* Hook to validate the current #pragma GCC target and set the state, and
37021 update the macros based on what was changed. If ARGS is NULL, then
37022 POP_TARGET is used to reset the options. */
37024 bool
37025 rs6000_pragma_target_parse (tree args, tree pop_target)
37027 tree prev_tree = build_target_option_node (&global_options);
37028 tree cur_tree;
37029 struct cl_target_option *prev_opt, *cur_opt;
37030 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
37031 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
37033 if (TARGET_DEBUG_TARGET)
37035 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
37036 fprintf (stderr, "args:");
37037 rs6000_debug_target_options (args, " ");
37038 fprintf (stderr, "\n");
37040 if (pop_target)
37042 fprintf (stderr, "pop_target:\n");
37043 debug_tree (pop_target);
37045 else
37046 fprintf (stderr, "pop_target: <NULL>\n");
37048 fprintf (stderr, "--------------------\n");
37051 if (! args)
37053 cur_tree = ((pop_target)
37054 ? pop_target
37055 : target_option_default_node);
37056 cl_target_option_restore (&global_options,
37057 TREE_TARGET_OPTION (cur_tree));
37059 else
37061 rs6000_cpu_index = rs6000_tune_index = -1;
37062 if (!rs6000_inner_target_options (args, false)
37063 || !rs6000_option_override_internal (false)
37064 || (cur_tree = build_target_option_node (&global_options))
37065 == NULL_TREE)
37067 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37068 fprintf (stderr, "invalid pragma\n");
37070 return false;
37074 target_option_current_node = cur_tree;
37075 rs6000_activate_target_options (target_option_current_node);
37077 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37078 change the macros that are defined. */
37079 if (rs6000_target_modify_macros_ptr)
37081 prev_opt = TREE_TARGET_OPTION (prev_tree);
37082 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37083 prev_flags = prev_opt->x_rs6000_isa_flags;
37085 cur_opt = TREE_TARGET_OPTION (cur_tree);
37086 cur_flags = cur_opt->x_rs6000_isa_flags;
37087 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37089 diff_bumask = (prev_bumask ^ cur_bumask);
37090 diff_flags = (prev_flags ^ cur_flags);
37092 if ((diff_flags != 0) || (diff_bumask != 0))
37094 /* Delete old macros. */
37095 rs6000_target_modify_macros_ptr (false,
37096 prev_flags & diff_flags,
37097 prev_bumask & diff_bumask);
37099 /* Define new macros. */
37100 rs6000_target_modify_macros_ptr (true,
37101 cur_flags & diff_flags,
37102 cur_bumask & diff_bumask);
37106 return true;
37110 /* Remember the last target of rs6000_set_current_function. */
37111 static GTY(()) tree rs6000_previous_fndecl;
37113 /* Restore target's globals from NEW_TREE and invalidate the
37114 rs6000_previous_fndecl cache. */
37116 void
37117 rs6000_activate_target_options (tree new_tree)
37119 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37120 if (TREE_TARGET_GLOBALS (new_tree))
37121 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37122 else if (new_tree == target_option_default_node)
37123 restore_target_globals (&default_target_globals);
37124 else
37125 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37126 rs6000_previous_fndecl = NULL_TREE;
37129 /* Establish appropriate back-end context for processing the function
37130 FNDECL. The argument might be NULL to indicate processing at top
37131 level, outside of any function scope. */
37132 static void
37133 rs6000_set_current_function (tree fndecl)
37135 if (TARGET_DEBUG_TARGET)
37137 fprintf (stderr, "\n==================== rs6000_set_current_function");
37139 if (fndecl)
37140 fprintf (stderr, ", fndecl %s (%p)",
37141 (DECL_NAME (fndecl)
37142 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37143 : "<unknown>"), (void *)fndecl);
37145 if (rs6000_previous_fndecl)
37146 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37148 fprintf (stderr, "\n");
37151 /* Only change the context if the function changes. This hook is called
37152 several times in the course of compiling a function, and we don't want to
37153 slow things down too much or call target_reinit when it isn't safe. */
37154 if (fndecl == rs6000_previous_fndecl)
37155 return;
37157 tree old_tree;
37158 if (rs6000_previous_fndecl == NULL_TREE)
37159 old_tree = target_option_current_node;
37160 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37161 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37162 else
37163 old_tree = target_option_default_node;
37165 tree new_tree;
37166 if (fndecl == NULL_TREE)
37168 if (old_tree != target_option_current_node)
37169 new_tree = target_option_current_node;
37170 else
37171 new_tree = NULL_TREE;
37173 else
37175 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37176 if (new_tree == NULL_TREE)
37177 new_tree = target_option_default_node;
37180 if (TARGET_DEBUG_TARGET)
37182 if (new_tree)
37184 fprintf (stderr, "\nnew fndecl target specific options:\n");
37185 debug_tree (new_tree);
37188 if (old_tree)
37190 fprintf (stderr, "\nold fndecl target specific options:\n");
37191 debug_tree (old_tree);
37194 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37195 fprintf (stderr, "--------------------\n");
37198 if (new_tree && old_tree != new_tree)
37199 rs6000_activate_target_options (new_tree);
37201 if (fndecl)
37202 rs6000_previous_fndecl = fndecl;
37206 /* Save the current options */
37208 static void
37209 rs6000_function_specific_save (struct cl_target_option *ptr,
37210 struct gcc_options *opts)
37212 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37213 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37216 /* Restore the current options */
37218 static void
37219 rs6000_function_specific_restore (struct gcc_options *opts,
37220 struct cl_target_option *ptr)
37223 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37224 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37225 (void) rs6000_option_override_internal (false);
37228 /* Print the current options */
37230 static void
37231 rs6000_function_specific_print (FILE *file, int indent,
37232 struct cl_target_option *ptr)
37234 rs6000_print_isa_options (file, indent, "Isa options set",
37235 ptr->x_rs6000_isa_flags);
37237 rs6000_print_isa_options (file, indent, "Isa options explicit",
37238 ptr->x_rs6000_isa_flags_explicit);
37241 /* Helper function to print the current isa or misc options on a line. */
37243 static void
37244 rs6000_print_options_internal (FILE *file,
37245 int indent,
37246 const char *string,
37247 HOST_WIDE_INT flags,
37248 const char *prefix,
37249 const struct rs6000_opt_mask *opts,
37250 size_t num_elements)
37252 size_t i;
37253 size_t start_column = 0;
37254 size_t cur_column;
37255 size_t max_column = 120;
37256 size_t prefix_len = strlen (prefix);
37257 size_t comma_len = 0;
37258 const char *comma = "";
37260 if (indent)
37261 start_column += fprintf (file, "%*s", indent, "");
37263 if (!flags)
37265 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37266 return;
37269 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37271 /* Print the various mask options. */
37272 cur_column = start_column;
37273 for (i = 0; i < num_elements; i++)
37275 bool invert = opts[i].invert;
37276 const char *name = opts[i].name;
37277 const char *no_str = "";
37278 HOST_WIDE_INT mask = opts[i].mask;
37279 size_t len = comma_len + prefix_len + strlen (name);
37281 if (!invert)
37283 if ((flags & mask) == 0)
37285 no_str = "no-";
37286 len += sizeof ("no-") - 1;
37289 flags &= ~mask;
37292 else
37294 if ((flags & mask) != 0)
37296 no_str = "no-";
37297 len += sizeof ("no-") - 1;
37300 flags |= mask;
37303 cur_column += len;
37304 if (cur_column > max_column)
37306 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37307 cur_column = start_column + len;
37308 comma = "";
37311 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37312 comma = ", ";
37313 comma_len = sizeof (", ") - 1;
37316 fputs ("\n", file);
37319 /* Helper function to print the current isa options on a line. */
37321 static void
37322 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37323 HOST_WIDE_INT flags)
37325 rs6000_print_options_internal (file, indent, string, flags, "-m",
37326 &rs6000_opt_masks[0],
37327 ARRAY_SIZE (rs6000_opt_masks));
37330 static void
37331 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37332 HOST_WIDE_INT flags)
37334 rs6000_print_options_internal (file, indent, string, flags, "",
37335 &rs6000_builtin_mask_names[0],
37336 ARRAY_SIZE (rs6000_builtin_mask_names));
37339 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37340 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37341 -mupper-regs-df, etc.).
37343 If the user used -mno-power8-vector, we need to turn off all of the implicit
37344 ISA 2.07 and 3.0 options that relate to the vector unit.
37346 If the user used -mno-power9-vector, we need to turn off all of the implicit
37347 ISA 3.0 options that relate to the vector unit.
37349 This function does not handle explicit options such as the user specifying
37350 -mdirect-move. These are handled in rs6000_option_override_internal, and
37351 the appropriate error is given if needed.
37353 We return a mask of all of the implicit options that should not be enabled
37354 by default. */
37356 static HOST_WIDE_INT
37357 rs6000_disable_incompatible_switches (void)
37359 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37360 size_t i, j;
37362 static const struct {
37363 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37364 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37365 const char *const name; /* name of the switch. */
37366 } flags[] = {
37367 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37368 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37369 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37372 for (i = 0; i < ARRAY_SIZE (flags); i++)
37374 HOST_WIDE_INT no_flag = flags[i].no_flag;
37376 if ((rs6000_isa_flags & no_flag) == 0
37377 && (rs6000_isa_flags_explicit & no_flag) != 0)
37379 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37380 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37381 & rs6000_isa_flags
37382 & dep_flags);
37384 if (set_flags)
37386 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37387 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37389 set_flags &= ~rs6000_opt_masks[j].mask;
37390 error ("%<-mno-%s%> turns off %<-m%s%>",
37391 flags[i].name,
37392 rs6000_opt_masks[j].name);
37395 gcc_assert (!set_flags);
37398 rs6000_isa_flags &= ~dep_flags;
37399 ignore_masks |= no_flag | dep_flags;
37403 return ignore_masks;
37407 /* Helper function for printing the function name when debugging. */
37409 static const char *
37410 get_decl_name (tree fn)
37412 tree name;
37414 if (!fn)
37415 return "<null>";
37417 name = DECL_NAME (fn);
37418 if (!name)
37419 return "<no-name>";
37421 return IDENTIFIER_POINTER (name);
37424 /* Return the clone id of the target we are compiling code for in a target
37425 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37426 the priority list for the target clones (ordered from lowest to
37427 highest). */
37429 static int
37430 rs6000_clone_priority (tree fndecl)
37432 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37433 HOST_WIDE_INT isa_masks;
37434 int ret = CLONE_DEFAULT;
37435 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37436 const char *attrs_str = NULL;
37438 attrs = TREE_VALUE (TREE_VALUE (attrs));
37439 attrs_str = TREE_STRING_POINTER (attrs);
37441 /* Return priority zero for default function. Return the ISA needed for the
37442 function if it is not the default. */
37443 if (strcmp (attrs_str, "default") != 0)
37445 if (fn_opts == NULL_TREE)
37446 fn_opts = target_option_default_node;
37448 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37449 isa_masks = rs6000_isa_flags;
37450 else
37451 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37453 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37454 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37455 break;
37458 if (TARGET_DEBUG_TARGET)
37459 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37460 get_decl_name (fndecl), ret);
37462 return ret;
37465 /* This compares the priority of target features in function DECL1 and DECL2.
37466 It returns positive value if DECL1 is higher priority, negative value if
37467 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37468 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37470 static int
37471 rs6000_compare_version_priority (tree decl1, tree decl2)
37473 int priority1 = rs6000_clone_priority (decl1);
37474 int priority2 = rs6000_clone_priority (decl2);
37475 int ret = priority1 - priority2;
37477 if (TARGET_DEBUG_TARGET)
37478 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37479 get_decl_name (decl1), get_decl_name (decl2), ret);
37481 return ret;
37484 /* Make a dispatcher declaration for the multi-versioned function DECL.
37485 Calls to DECL function will be replaced with calls to the dispatcher
37486 by the front-end. Returns the decl of the dispatcher function. */
37488 static tree
37489 rs6000_get_function_versions_dispatcher (void *decl)
37491 tree fn = (tree) decl;
37492 struct cgraph_node *node = NULL;
37493 struct cgraph_node *default_node = NULL;
37494 struct cgraph_function_version_info *node_v = NULL;
37495 struct cgraph_function_version_info *first_v = NULL;
37497 tree dispatch_decl = NULL;
37499 struct cgraph_function_version_info *default_version_info = NULL;
37500 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37502 if (TARGET_DEBUG_TARGET)
37503 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37504 get_decl_name (fn));
37506 node = cgraph_node::get (fn);
37507 gcc_assert (node != NULL);
37509 node_v = node->function_version ();
37510 gcc_assert (node_v != NULL);
37512 if (node_v->dispatcher_resolver != NULL)
37513 return node_v->dispatcher_resolver;
37515 /* Find the default version and make it the first node. */
37516 first_v = node_v;
37517 /* Go to the beginning of the chain. */
37518 while (first_v->prev != NULL)
37519 first_v = first_v->prev;
37521 default_version_info = first_v;
37522 while (default_version_info != NULL)
37524 const tree decl2 = default_version_info->this_node->decl;
37525 if (is_function_default_version (decl2))
37526 break;
37527 default_version_info = default_version_info->next;
37530 /* If there is no default node, just return NULL. */
37531 if (default_version_info == NULL)
37532 return NULL;
37534 /* Make default info the first node. */
37535 if (first_v != default_version_info)
37537 default_version_info->prev->next = default_version_info->next;
37538 if (default_version_info->next)
37539 default_version_info->next->prev = default_version_info->prev;
37540 first_v->prev = default_version_info;
37541 default_version_info->next = first_v;
37542 default_version_info->prev = NULL;
37545 default_node = default_version_info->this_node;
37547 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37548 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37549 "target_clones attribute needs GLIBC (2.23 and newer) that "
37550 "exports hardware capability bits");
37551 #else
37553 if (targetm.has_ifunc_p ())
37555 struct cgraph_function_version_info *it_v = NULL;
37556 struct cgraph_node *dispatcher_node = NULL;
37557 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37559 /* Right now, the dispatching is done via ifunc. */
37560 dispatch_decl = make_dispatcher_decl (default_node->decl);
37562 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37563 gcc_assert (dispatcher_node != NULL);
37564 dispatcher_node->dispatcher_function = 1;
37565 dispatcher_version_info
37566 = dispatcher_node->insert_new_function_version ();
37567 dispatcher_version_info->next = default_version_info;
37568 dispatcher_node->definition = 1;
37570 /* Set the dispatcher for all the versions. */
37571 it_v = default_version_info;
37572 while (it_v != NULL)
37574 it_v->dispatcher_resolver = dispatch_decl;
37575 it_v = it_v->next;
37578 else
37580 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37581 "multiversioning needs ifunc which is not supported "
37582 "on this target");
37584 #endif
37586 return dispatch_decl;
37589 /* Make the resolver function decl to dispatch the versions of a multi-
37590 versioned function, DEFAULT_DECL. Create an empty basic block in the
37591 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37592 function. */
37594 static tree
37595 make_resolver_func (const tree default_decl,
37596 const tree dispatch_decl,
37597 basic_block *empty_bb)
37599 /* Make the resolver function static. The resolver function returns
37600 void *. */
37601 tree decl_name = clone_function_name (default_decl, "resolver");
37602 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37603 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37604 tree decl = build_fn_decl (resolver_name, type);
37605 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37607 DECL_NAME (decl) = decl_name;
37608 TREE_USED (decl) = 1;
37609 DECL_ARTIFICIAL (decl) = 1;
37610 DECL_IGNORED_P (decl) = 0;
37611 TREE_PUBLIC (decl) = 0;
37612 DECL_UNINLINABLE (decl) = 1;
37614 /* Resolver is not external, body is generated. */
37615 DECL_EXTERNAL (decl) = 0;
37616 DECL_EXTERNAL (dispatch_decl) = 0;
37618 DECL_CONTEXT (decl) = NULL_TREE;
37619 DECL_INITIAL (decl) = make_node (BLOCK);
37620 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37622 /* Build result decl and add to function_decl. */
37623 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37624 DECL_ARTIFICIAL (t) = 1;
37625 DECL_IGNORED_P (t) = 1;
37626 DECL_RESULT (decl) = t;
37628 gimplify_function_tree (decl);
37629 push_cfun (DECL_STRUCT_FUNCTION (decl));
37630 *empty_bb = init_lowered_empty_function (decl, false,
37631 profile_count::uninitialized ());
37633 cgraph_node::add_new_function (decl, true);
37634 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37636 pop_cfun ();
37638 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37639 DECL_ATTRIBUTES (dispatch_decl)
37640 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37642 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37644 return decl;
37647 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37648 return a pointer to VERSION_DECL if we are running on a machine that
37649 supports the index CLONE_ISA hardware architecture bits. This function will
37650 be called during version dispatch to decide which function version to
37651 execute. It returns the basic block at the end, to which more conditions
37652 can be added. */
37654 static basic_block
37655 add_condition_to_bb (tree function_decl, tree version_decl,
37656 int clone_isa, basic_block new_bb)
37658 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37660 gcc_assert (new_bb != NULL);
37661 gimple_seq gseq = bb_seq (new_bb);
37664 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37665 build_fold_addr_expr (version_decl));
37666 tree result_var = create_tmp_var (ptr_type_node);
37667 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37668 gimple *return_stmt = gimple_build_return (result_var);
37670 if (clone_isa == CLONE_DEFAULT)
37672 gimple_seq_add_stmt (&gseq, convert_stmt);
37673 gimple_seq_add_stmt (&gseq, return_stmt);
37674 set_bb_seq (new_bb, gseq);
37675 gimple_set_bb (convert_stmt, new_bb);
37676 gimple_set_bb (return_stmt, new_bb);
37677 pop_cfun ();
37678 return new_bb;
37681 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37682 tree cond_var = create_tmp_var (bool_int_type_node);
37683 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37684 const char *arg_str = rs6000_clone_map[clone_isa].name;
37685 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37686 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37687 gimple_call_set_lhs (call_cond_stmt, cond_var);
37689 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37690 gimple_set_bb (call_cond_stmt, new_bb);
37691 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37693 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37694 NULL_TREE, NULL_TREE);
37695 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37696 gimple_set_bb (if_else_stmt, new_bb);
37697 gimple_seq_add_stmt (&gseq, if_else_stmt);
37699 gimple_seq_add_stmt (&gseq, convert_stmt);
37700 gimple_seq_add_stmt (&gseq, return_stmt);
37701 set_bb_seq (new_bb, gseq);
37703 basic_block bb1 = new_bb;
37704 edge e12 = split_block (bb1, if_else_stmt);
37705 basic_block bb2 = e12->dest;
37706 e12->flags &= ~EDGE_FALLTHRU;
37707 e12->flags |= EDGE_TRUE_VALUE;
37709 edge e23 = split_block (bb2, return_stmt);
37710 gimple_set_bb (convert_stmt, bb2);
37711 gimple_set_bb (return_stmt, bb2);
37713 basic_block bb3 = e23->dest;
37714 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37716 remove_edge (e23);
37717 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37719 pop_cfun ();
37720 return bb3;
37723 /* This function generates the dispatch function for multi-versioned functions.
37724 DISPATCH_DECL is the function which will contain the dispatch logic.
37725 FNDECLS are the function choices for dispatch, and is a tree chain.
37726 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37727 code is generated. */
37729 static int
37730 dispatch_function_versions (tree dispatch_decl,
37731 void *fndecls_p,
37732 basic_block *empty_bb)
37734 int ix;
37735 tree ele;
37736 vec<tree> *fndecls;
37737 tree clones[CLONE_MAX];
37739 if (TARGET_DEBUG_TARGET)
37740 fputs ("dispatch_function_versions, top\n", stderr);
37742 gcc_assert (dispatch_decl != NULL
37743 && fndecls_p != NULL
37744 && empty_bb != NULL);
37746 /* fndecls_p is actually a vector. */
37747 fndecls = static_cast<vec<tree> *> (fndecls_p);
37749 /* At least one more version other than the default. */
37750 gcc_assert (fndecls->length () >= 2);
37752 /* The first version in the vector is the default decl. */
37753 memset ((void *) clones, '\0', sizeof (clones));
37754 clones[CLONE_DEFAULT] = (*fndecls)[0];
37756 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37757 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37758 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37759 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37760 to insert the code here to do the call. */
37762 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37764 int priority = rs6000_clone_priority (ele);
37765 if (!clones[priority])
37766 clones[priority] = ele;
37769 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37770 if (clones[ix])
37772 if (TARGET_DEBUG_TARGET)
37773 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37774 ix, get_decl_name (clones[ix]));
37776 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37777 *empty_bb);
37780 return 0;
37783 /* Generate the dispatching code body to dispatch multi-versioned function
37784 DECL. The target hook is called to process the "target" attributes and
37785 provide the code to dispatch the right function at run-time. NODE points
37786 to the dispatcher decl whose body will be created. */
37788 static tree
37789 rs6000_generate_version_dispatcher_body (void *node_p)
37791 tree resolver;
37792 basic_block empty_bb;
37793 struct cgraph_node *node = (cgraph_node *) node_p;
37794 struct cgraph_function_version_info *ninfo = node->function_version ();
37796 if (ninfo->dispatcher_resolver)
37797 return ninfo->dispatcher_resolver;
37799 /* node is going to be an alias, so remove the finalized bit. */
37800 node->definition = false;
37802 /* The first version in the chain corresponds to the default version. */
37803 ninfo->dispatcher_resolver = resolver
37804 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37806 if (TARGET_DEBUG_TARGET)
37807 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37808 get_decl_name (resolver));
37810 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37811 auto_vec<tree, 2> fn_ver_vec;
37813 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37814 vinfo;
37815 vinfo = vinfo->next)
37817 struct cgraph_node *version = vinfo->this_node;
37818 /* Check for virtual functions here again, as by this time it should
37819 have been determined if this function needs a vtable index or
37820 not. This happens for methods in derived classes that override
37821 virtual methods in base classes but are not explicitly marked as
37822 virtual. */
37823 if (DECL_VINDEX (version->decl))
37824 sorry ("Virtual function multiversioning not supported");
37826 fn_ver_vec.safe_push (version->decl);
37829 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37830 cgraph_edge::rebuild_edges ();
37831 pop_cfun ();
37832 return resolver;
37836 /* Hook to determine if one function can safely inline another. */
37838 static bool
37839 rs6000_can_inline_p (tree caller, tree callee)
37841 bool ret = false;
37842 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37843 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37845 /* If callee has no option attributes, then it is ok to inline. */
37846 if (!callee_tree)
37847 ret = true;
37849 /* If caller has no option attributes, but callee does then it is not ok to
37850 inline. */
37851 else if (!caller_tree)
37852 ret = false;
37854 else
37856 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37857 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37859 /* Callee's options should a subset of the caller's, i.e. a vsx function
37860 can inline an altivec function but a non-vsx function can't inline a
37861 vsx function. */
37862 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37863 == callee_opts->x_rs6000_isa_flags)
37864 ret = true;
37867 if (TARGET_DEBUG_TARGET)
37868 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37869 get_decl_name (caller), get_decl_name (callee),
37870 (ret ? "can" : "cannot"));
37872 return ret;
37875 /* Allocate a stack temp and fixup the address so it meets the particular
37876 memory requirements (either offetable or REG+REG addressing). */
37879 rs6000_allocate_stack_temp (machine_mode mode,
37880 bool offsettable_p,
37881 bool reg_reg_p)
37883 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37884 rtx addr = XEXP (stack, 0);
37885 int strict_p = reload_completed;
37887 if (!legitimate_indirect_address_p (addr, strict_p))
37889 if (offsettable_p
37890 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37891 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37893 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37894 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37897 return stack;
37900 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37901 to such a form to deal with memory reference instructions like STFIWX that
37902 only take reg+reg addressing. */
37905 rs6000_address_for_fpconvert (rtx x)
37907 rtx addr;
37909 gcc_assert (MEM_P (x));
37910 addr = XEXP (x, 0);
37911 if (can_create_pseudo_p ()
37912 && ! legitimate_indirect_address_p (addr, reload_completed)
37913 && ! legitimate_indexed_address_p (addr, reload_completed))
37915 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37917 rtx reg = XEXP (addr, 0);
37918 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37919 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37920 gcc_assert (REG_P (reg));
37921 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37922 addr = reg;
37924 else if (GET_CODE (addr) == PRE_MODIFY)
37926 rtx reg = XEXP (addr, 0);
37927 rtx expr = XEXP (addr, 1);
37928 gcc_assert (REG_P (reg));
37929 gcc_assert (GET_CODE (expr) == PLUS);
37930 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37931 addr = reg;
37934 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37937 return x;
37940 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37942 On the RS/6000, all integer constants are acceptable, most won't be valid
37943 for particular insns, though. Only easy FP constants are acceptable. */
37945 static bool
37946 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37948 if (TARGET_ELF && tls_referenced_p (x))
37949 return false;
37951 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37952 || GET_MODE (x) == VOIDmode
37953 || (TARGET_POWERPC64 && mode == DImode)
37954 || easy_fp_constant (x, mode)
37955 || easy_vector_constant (x, mode));
37959 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37961 static bool
37962 chain_already_loaded (rtx_insn *last)
37964 for (; last != NULL; last = PREV_INSN (last))
37966 if (NONJUMP_INSN_P (last))
37968 rtx patt = PATTERN (last);
37970 if (GET_CODE (patt) == SET)
37972 rtx lhs = XEXP (patt, 0);
37974 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37975 return true;
37979 return false;
37982 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37984 void
37985 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37987 const bool direct_call_p
37988 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37989 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37990 rtx toc_load = NULL_RTX;
37991 rtx toc_restore = NULL_RTX;
37992 rtx func_addr;
37993 rtx abi_reg = NULL_RTX;
37994 rtx call[4];
37995 int n_call;
37996 rtx insn;
37998 /* Handle longcall attributes. */
37999 if (INTVAL (cookie) & CALL_LONG)
38000 func_desc = rs6000_longcall_ref (func_desc);
38002 /* Handle indirect calls. */
38003 if (GET_CODE (func_desc) != SYMBOL_REF
38004 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
38006 /* Save the TOC into its reserved slot before the call,
38007 and prepare to restore it after the call. */
38008 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
38009 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
38010 rtx stack_toc_mem = gen_frame_mem (Pmode,
38011 gen_rtx_PLUS (Pmode, stack_ptr,
38012 stack_toc_offset));
38013 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
38014 gen_rtvec (1, stack_toc_offset),
38015 UNSPEC_TOCSLOT);
38016 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
38018 /* Can we optimize saving the TOC in the prologue or
38019 do we need to do it at every call? */
38020 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
38021 cfun->machine->save_toc_in_prologue = true;
38022 else
38024 MEM_VOLATILE_P (stack_toc_mem) = 1;
38025 emit_move_insn (stack_toc_mem, toc_reg);
38028 if (DEFAULT_ABI == ABI_ELFv2)
38030 /* A function pointer in the ELFv2 ABI is just a plain address, but
38031 the ABI requires it to be loaded into r12 before the call. */
38032 func_addr = gen_rtx_REG (Pmode, 12);
38033 emit_move_insn (func_addr, func_desc);
38034 abi_reg = func_addr;
38036 else
38038 /* A function pointer under AIX is a pointer to a data area whose
38039 first word contains the actual address of the function, whose
38040 second word contains a pointer to its TOC, and whose third word
38041 contains a value to place in the static chain register (r11).
38042 Note that if we load the static chain, our "trampoline" need
38043 not have any executable code. */
38045 /* Load up address of the actual function. */
38046 func_desc = force_reg (Pmode, func_desc);
38047 func_addr = gen_reg_rtx (Pmode);
38048 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
38050 /* Prepare to load the TOC of the called function. Note that the
38051 TOC load must happen immediately before the actual call so
38052 that unwinding the TOC registers works correctly. See the
38053 comment in frob_update_context. */
38054 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38055 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38056 gen_rtx_PLUS (Pmode, func_desc,
38057 func_toc_offset));
38058 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38060 /* If we have a static chain, load it up. But, if the call was
38061 originally direct, the 3rd word has not been written since no
38062 trampoline has been built, so we ought not to load it, lest we
38063 override a static chain value. */
38064 if (!direct_call_p
38065 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38066 && !chain_already_loaded (get_current_sequence ()->next->last))
38068 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38069 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38070 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38071 gen_rtx_PLUS (Pmode, func_desc,
38072 func_sc_offset));
38073 emit_move_insn (sc_reg, func_sc_mem);
38074 abi_reg = sc_reg;
38078 else
38080 /* Direct calls use the TOC: for local calls, the callee will
38081 assume the TOC register is set; for non-local calls, the
38082 PLT stub needs the TOC register. */
38083 abi_reg = toc_reg;
38084 func_addr = func_desc;
38087 /* Create the call. */
38088 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
38089 if (value != NULL_RTX)
38090 call[0] = gen_rtx_SET (value, call[0]);
38091 n_call = 1;
38093 if (toc_load)
38094 call[n_call++] = toc_load;
38095 if (toc_restore)
38096 call[n_call++] = toc_restore;
38098 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38100 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38101 insn = emit_call_insn (insn);
38103 /* Mention all registers defined by the ABI to hold information
38104 as uses in CALL_INSN_FUNCTION_USAGE. */
38105 if (abi_reg)
38106 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38109 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38111 void
38112 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38114 rtx call[2];
38115 rtx insn;
38117 gcc_assert (INTVAL (cookie) == 0);
38119 /* Create the call. */
38120 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
38121 if (value != NULL_RTX)
38122 call[0] = gen_rtx_SET (value, call[0]);
38124 call[1] = simple_return_rtx;
38126 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38127 insn = emit_call_insn (insn);
38129 /* Note use of the TOC register. */
38130 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38133 /* Return whether we need to always update the saved TOC pointer when we update
38134 the stack pointer. */
38136 static bool
38137 rs6000_save_toc_in_prologue_p (void)
38139 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38142 #ifdef HAVE_GAS_HIDDEN
38143 # define USE_HIDDEN_LINKONCE 1
38144 #else
38145 # define USE_HIDDEN_LINKONCE 0
38146 #endif
38148 /* Fills in the label name that should be used for a 476 link stack thunk. */
38150 void
38151 get_ppc476_thunk_name (char name[32])
38153 gcc_assert (TARGET_LINK_STACK);
38155 if (USE_HIDDEN_LINKONCE)
38156 sprintf (name, "__ppc476.get_thunk");
38157 else
38158 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38161 /* This function emits the simple thunk routine that is used to preserve
38162 the link stack on the 476 cpu. */
38164 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38165 static void
38166 rs6000_code_end (void)
38168 char name[32];
38169 tree decl;
38171 if (!TARGET_LINK_STACK)
38172 return;
38174 get_ppc476_thunk_name (name);
38176 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38177 build_function_type_list (void_type_node, NULL_TREE));
38178 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38179 NULL_TREE, void_type_node);
38180 TREE_PUBLIC (decl) = 1;
38181 TREE_STATIC (decl) = 1;
38183 #if RS6000_WEAK
38184 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38186 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38187 targetm.asm_out.unique_section (decl, 0);
38188 switch_to_section (get_named_section (decl, NULL, 0));
38189 DECL_WEAK (decl) = 1;
38190 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38191 targetm.asm_out.globalize_label (asm_out_file, name);
38192 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38193 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38195 else
38196 #endif
38198 switch_to_section (text_section);
38199 ASM_OUTPUT_LABEL (asm_out_file, name);
38202 DECL_INITIAL (decl) = make_node (BLOCK);
38203 current_function_decl = decl;
38204 allocate_struct_function (decl, false);
38205 init_function_start (decl);
38206 first_function_block_is_cold = false;
38207 /* Make sure unwind info is emitted for the thunk if needed. */
38208 final_start_function (emit_barrier (), asm_out_file, 1);
38210 fputs ("\tblr\n", asm_out_file);
38212 final_end_function ();
38213 init_insn_lengths ();
38214 free_after_compilation (cfun);
38215 set_cfun (NULL);
38216 current_function_decl = NULL;
38219 /* Add r30 to hard reg set if the prologue sets it up and it is not
38220 pic_offset_table_rtx. */
38222 static void
38223 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38225 if (!TARGET_SINGLE_PIC_BASE
38226 && TARGET_TOC
38227 && TARGET_MINIMAL_TOC
38228 && !constant_pool_empty_p ())
38229 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38230 if (cfun->machine->split_stack_argp_used)
38231 add_to_hard_reg_set (&set->set, Pmode, 12);
38233 /* Make sure the hard reg set doesn't include r2, which was possibly added
38234 via PIC_OFFSET_TABLE_REGNUM. */
38235 if (TARGET_TOC)
38236 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38240 /* Helper function for rs6000_split_logical to emit a logical instruction after
38241 spliting the operation to single GPR registers.
38243 DEST is the destination register.
38244 OP1 and OP2 are the input source registers.
38245 CODE is the base operation (AND, IOR, XOR, NOT).
38246 MODE is the machine mode.
38247 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38248 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38249 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38251 static void
38252 rs6000_split_logical_inner (rtx dest,
38253 rtx op1,
38254 rtx op2,
38255 enum rtx_code code,
38256 machine_mode mode,
38257 bool complement_final_p,
38258 bool complement_op1_p,
38259 bool complement_op2_p)
38261 rtx bool_rtx;
38263 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38264 if (op2 && GET_CODE (op2) == CONST_INT
38265 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38266 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38268 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38269 HOST_WIDE_INT value = INTVAL (op2) & mask;
38271 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38272 if (code == AND)
38274 if (value == 0)
38276 emit_insn (gen_rtx_SET (dest, const0_rtx));
38277 return;
38280 else if (value == mask)
38282 if (!rtx_equal_p (dest, op1))
38283 emit_insn (gen_rtx_SET (dest, op1));
38284 return;
38288 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38289 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38290 else if (code == IOR || code == XOR)
38292 if (value == 0)
38294 if (!rtx_equal_p (dest, op1))
38295 emit_insn (gen_rtx_SET (dest, op1));
38296 return;
38301 if (code == AND && mode == SImode
38302 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38304 emit_insn (gen_andsi3 (dest, op1, op2));
38305 return;
38308 if (complement_op1_p)
38309 op1 = gen_rtx_NOT (mode, op1);
38311 if (complement_op2_p)
38312 op2 = gen_rtx_NOT (mode, op2);
38314 /* For canonical RTL, if only one arm is inverted it is the first. */
38315 if (!complement_op1_p && complement_op2_p)
38316 std::swap (op1, op2);
38318 bool_rtx = ((code == NOT)
38319 ? gen_rtx_NOT (mode, op1)
38320 : gen_rtx_fmt_ee (code, mode, op1, op2));
38322 if (complement_final_p)
38323 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38325 emit_insn (gen_rtx_SET (dest, bool_rtx));
38328 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38329 operations are split immediately during RTL generation to allow for more
38330 optimizations of the AND/IOR/XOR.
38332 OPERANDS is an array containing the destination and two input operands.
38333 CODE is the base operation (AND, IOR, XOR, NOT).
38334 MODE is the machine mode.
38335 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38336 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38337 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38338 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38339 formation of the AND instructions. */
38341 static void
38342 rs6000_split_logical_di (rtx operands[3],
38343 enum rtx_code code,
38344 bool complement_final_p,
38345 bool complement_op1_p,
38346 bool complement_op2_p)
38348 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38349 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38350 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38351 enum hi_lo { hi = 0, lo = 1 };
38352 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38353 size_t i;
38355 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38356 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38357 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38358 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38360 if (code == NOT)
38361 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38362 else
38364 if (GET_CODE (operands[2]) != CONST_INT)
38366 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38367 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38369 else
38371 HOST_WIDE_INT value = INTVAL (operands[2]);
38372 HOST_WIDE_INT value_hi_lo[2];
38374 gcc_assert (!complement_final_p);
38375 gcc_assert (!complement_op1_p);
38376 gcc_assert (!complement_op2_p);
38378 value_hi_lo[hi] = value >> 32;
38379 value_hi_lo[lo] = value & lower_32bits;
38381 for (i = 0; i < 2; i++)
38383 HOST_WIDE_INT sub_value = value_hi_lo[i];
38385 if (sub_value & sign_bit)
38386 sub_value |= upper_32bits;
38388 op2_hi_lo[i] = GEN_INT (sub_value);
38390 /* If this is an AND instruction, check to see if we need to load
38391 the value in a register. */
38392 if (code == AND && sub_value != -1 && sub_value != 0
38393 && !and_operand (op2_hi_lo[i], SImode))
38394 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38399 for (i = 0; i < 2; i++)
38401 /* Split large IOR/XOR operations. */
38402 if ((code == IOR || code == XOR)
38403 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38404 && !complement_final_p
38405 && !complement_op1_p
38406 && !complement_op2_p
38407 && !logical_const_operand (op2_hi_lo[i], SImode))
38409 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38410 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38411 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38412 rtx tmp = gen_reg_rtx (SImode);
38414 /* Make sure the constant is sign extended. */
38415 if ((hi_16bits & sign_bit) != 0)
38416 hi_16bits |= upper_32bits;
38418 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38419 code, SImode, false, false, false);
38421 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38422 code, SImode, false, false, false);
38424 else
38425 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38426 code, SImode, complement_final_p,
38427 complement_op1_p, complement_op2_p);
38430 return;
38433 /* Split the insns that make up boolean operations operating on multiple GPR
38434 registers. The boolean MD patterns ensure that the inputs either are
38435 exactly the same as the output registers, or there is no overlap.
38437 OPERANDS is an array containing the destination and two input operands.
38438 CODE is the base operation (AND, IOR, XOR, NOT).
38439 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38440 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38441 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38443 void
38444 rs6000_split_logical (rtx operands[3],
38445 enum rtx_code code,
38446 bool complement_final_p,
38447 bool complement_op1_p,
38448 bool complement_op2_p)
38450 machine_mode mode = GET_MODE (operands[0]);
38451 machine_mode sub_mode;
38452 rtx op0, op1, op2;
38453 int sub_size, regno0, regno1, nregs, i;
38455 /* If this is DImode, use the specialized version that can run before
38456 register allocation. */
38457 if (mode == DImode && !TARGET_POWERPC64)
38459 rs6000_split_logical_di (operands, code, complement_final_p,
38460 complement_op1_p, complement_op2_p);
38461 return;
38464 op0 = operands[0];
38465 op1 = operands[1];
38466 op2 = (code == NOT) ? NULL_RTX : operands[2];
38467 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38468 sub_size = GET_MODE_SIZE (sub_mode);
38469 regno0 = REGNO (op0);
38470 regno1 = REGNO (op1);
38472 gcc_assert (reload_completed);
38473 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38474 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38476 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38477 gcc_assert (nregs > 1);
38479 if (op2 && REG_P (op2))
38480 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38482 for (i = 0; i < nregs; i++)
38484 int offset = i * sub_size;
38485 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38486 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38487 rtx sub_op2 = ((code == NOT)
38488 ? NULL_RTX
38489 : simplify_subreg (sub_mode, op2, mode, offset));
38491 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38492 complement_final_p, complement_op1_p,
38493 complement_op2_p);
38496 return;
38500 /* Return true if the peephole2 can combine a load involving a combination of
38501 an addis instruction and a load with an offset that can be fused together on
38502 a power8. */
38504 bool
38505 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38506 rtx addis_value, /* addis value. */
38507 rtx target, /* target register that is loaded. */
38508 rtx mem) /* bottom part of the memory addr. */
38510 rtx addr;
38511 rtx base_reg;
38513 /* Validate arguments. */
38514 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38515 return false;
38517 if (!base_reg_operand (target, GET_MODE (target)))
38518 return false;
38520 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38521 return false;
38523 /* Allow sign/zero extension. */
38524 if (GET_CODE (mem) == ZERO_EXTEND
38525 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38526 mem = XEXP (mem, 0);
38528 if (!MEM_P (mem))
38529 return false;
38531 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38532 return false;
38534 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38535 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38536 return false;
38538 /* Validate that the register used to load the high value is either the
38539 register being loaded, or we can safely replace its use.
38541 This function is only called from the peephole2 pass and we assume that
38542 there are 2 instructions in the peephole (addis and load), so we want to
38543 check if the target register was not used in the memory address and the
38544 register to hold the addis result is dead after the peephole. */
38545 if (REGNO (addis_reg) != REGNO (target))
38547 if (reg_mentioned_p (target, mem))
38548 return false;
38550 if (!peep2_reg_dead_p (2, addis_reg))
38551 return false;
38553 /* If the target register being loaded is the stack pointer, we must
38554 avoid loading any other value into it, even temporarily. */
38555 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38556 return false;
38559 base_reg = XEXP (addr, 0);
38560 return REGNO (addis_reg) == REGNO (base_reg);
38563 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38564 sequence. We adjust the addis register to use the target register. If the
38565 load sign extends, we adjust the code to do the zero extending load, and an
38566 explicit sign extension later since the fusion only covers zero extending
38567 loads.
38569 The operands are:
38570 operands[0] register set with addis (to be replaced with target)
38571 operands[1] value set via addis
38572 operands[2] target register being loaded
38573 operands[3] D-form memory reference using operands[0]. */
38575 void
38576 expand_fusion_gpr_load (rtx *operands)
38578 rtx addis_value = operands[1];
38579 rtx target = operands[2];
38580 rtx orig_mem = operands[3];
38581 rtx new_addr, new_mem, orig_addr, offset;
38582 enum rtx_code plus_or_lo_sum;
38583 machine_mode target_mode = GET_MODE (target);
38584 machine_mode extend_mode = target_mode;
38585 machine_mode ptr_mode = Pmode;
38586 enum rtx_code extend = UNKNOWN;
38588 if (GET_CODE (orig_mem) == ZERO_EXTEND
38589 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38591 extend = GET_CODE (orig_mem);
38592 orig_mem = XEXP (orig_mem, 0);
38593 target_mode = GET_MODE (orig_mem);
38596 gcc_assert (MEM_P (orig_mem));
38598 orig_addr = XEXP (orig_mem, 0);
38599 plus_or_lo_sum = GET_CODE (orig_addr);
38600 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38602 offset = XEXP (orig_addr, 1);
38603 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38604 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38606 if (extend != UNKNOWN)
38607 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38609 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38610 UNSPEC_FUSION_GPR);
38611 emit_insn (gen_rtx_SET (target, new_mem));
38613 if (extend == SIGN_EXTEND)
38615 int sub_off = ((BYTES_BIG_ENDIAN)
38616 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38617 : 0);
38618 rtx sign_reg
38619 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38621 emit_insn (gen_rtx_SET (target,
38622 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38625 return;
38628 /* Emit the addis instruction that will be part of a fused instruction
38629 sequence. */
38631 void
38632 emit_fusion_addis (rtx target, rtx addis_value)
38634 rtx fuse_ops[10];
38635 const char *addis_str = NULL;
38637 /* Emit the addis instruction. */
38638 fuse_ops[0] = target;
38639 if (satisfies_constraint_L (addis_value))
38641 fuse_ops[1] = addis_value;
38642 addis_str = "lis %0,%v1";
38645 else if (GET_CODE (addis_value) == PLUS)
38647 rtx op0 = XEXP (addis_value, 0);
38648 rtx op1 = XEXP (addis_value, 1);
38650 if (REG_P (op0) && CONST_INT_P (op1)
38651 && satisfies_constraint_L (op1))
38653 fuse_ops[1] = op0;
38654 fuse_ops[2] = op1;
38655 addis_str = "addis %0,%1,%v2";
38659 else if (GET_CODE (addis_value) == HIGH)
38661 rtx value = XEXP (addis_value, 0);
38662 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38664 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38665 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38666 if (TARGET_ELF)
38667 addis_str = "addis %0,%2,%1@toc@ha";
38669 else if (TARGET_XCOFF)
38670 addis_str = "addis %0,%1@u(%2)";
38672 else
38673 gcc_unreachable ();
38676 else if (GET_CODE (value) == PLUS)
38678 rtx op0 = XEXP (value, 0);
38679 rtx op1 = XEXP (value, 1);
38681 if (GET_CODE (op0) == UNSPEC
38682 && XINT (op0, 1) == UNSPEC_TOCREL
38683 && CONST_INT_P (op1))
38685 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38686 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38687 fuse_ops[3] = op1;
38688 if (TARGET_ELF)
38689 addis_str = "addis %0,%2,%1+%3@toc@ha";
38691 else if (TARGET_XCOFF)
38692 addis_str = "addis %0,%1+%3@u(%2)";
38694 else
38695 gcc_unreachable ();
38699 else if (satisfies_constraint_L (value))
38701 fuse_ops[1] = value;
38702 addis_str = "lis %0,%v1";
38705 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38707 fuse_ops[1] = value;
38708 addis_str = "lis %0,%1@ha";
38712 if (!addis_str)
38713 fatal_insn ("Could not generate addis value for fusion", addis_value);
38715 output_asm_insn (addis_str, fuse_ops);
38718 /* Emit a D-form load or store instruction that is the second instruction
38719 of a fusion sequence. */
38721 void
38722 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38723 const char *insn_str)
38725 rtx fuse_ops[10];
38726 char insn_template[80];
38728 fuse_ops[0] = load_store_reg;
38729 fuse_ops[1] = addis_reg;
38731 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38733 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38734 fuse_ops[2] = offset;
38735 output_asm_insn (insn_template, fuse_ops);
38738 else if (GET_CODE (offset) == UNSPEC
38739 && XINT (offset, 1) == UNSPEC_TOCREL)
38741 if (TARGET_ELF)
38742 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38744 else if (TARGET_XCOFF)
38745 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38747 else
38748 gcc_unreachable ();
38750 fuse_ops[2] = XVECEXP (offset, 0, 0);
38751 output_asm_insn (insn_template, fuse_ops);
38754 else if (GET_CODE (offset) == PLUS
38755 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38756 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38757 && CONST_INT_P (XEXP (offset, 1)))
38759 rtx tocrel_unspec = XEXP (offset, 0);
38760 if (TARGET_ELF)
38761 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38763 else if (TARGET_XCOFF)
38764 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38766 else
38767 gcc_unreachable ();
38769 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38770 fuse_ops[3] = XEXP (offset, 1);
38771 output_asm_insn (insn_template, fuse_ops);
38774 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38776 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38778 fuse_ops[2] = offset;
38779 output_asm_insn (insn_template, fuse_ops);
38782 else
38783 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38785 return;
38788 /* Wrap a TOC address that can be fused to indicate that special fusion
38789 processing is needed. */
38792 fusion_wrap_memory_address (rtx old_mem)
38794 rtx old_addr = XEXP (old_mem, 0);
38795 rtvec v = gen_rtvec (1, old_addr);
38796 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38797 return replace_equiv_address_nv (old_mem, new_addr, false);
38800 /* Given an address, convert it into the addis and load offset parts. Addresses
38801 created during the peephole2 process look like:
38802 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38803 (unspec [(...)] UNSPEC_TOCREL))
38805 Addresses created via toc fusion look like:
38806 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38808 static void
38809 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38811 rtx hi, lo;
38813 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38815 lo = XVECEXP (addr, 0, 0);
38816 hi = gen_rtx_HIGH (Pmode, lo);
38818 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38820 hi = XEXP (addr, 0);
38821 lo = XEXP (addr, 1);
38823 else
38824 gcc_unreachable ();
38826 *p_hi = hi;
38827 *p_lo = lo;
38830 /* Return a string to fuse an addis instruction with a gpr load to the same
38831 register that we loaded up the addis instruction. The address that is used
38832 is the logical address that was formed during peephole2:
38833 (lo_sum (high) (low-part))
38835 Or the address is the TOC address that is wrapped before register allocation:
38836 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38838 The code is complicated, so we call output_asm_insn directly, and just
38839 return "". */
38841 const char *
38842 emit_fusion_gpr_load (rtx target, rtx mem)
38844 rtx addis_value;
38845 rtx addr;
38846 rtx load_offset;
38847 const char *load_str = NULL;
38848 machine_mode mode;
38850 if (GET_CODE (mem) == ZERO_EXTEND)
38851 mem = XEXP (mem, 0);
38853 gcc_assert (REG_P (target) && MEM_P (mem));
38855 addr = XEXP (mem, 0);
38856 fusion_split_address (addr, &addis_value, &load_offset);
38858 /* Now emit the load instruction to the same register. */
38859 mode = GET_MODE (mem);
38860 switch (mode)
38862 case E_QImode:
38863 load_str = "lbz";
38864 break;
38866 case E_HImode:
38867 load_str = "lhz";
38868 break;
38870 case E_SImode:
38871 case E_SFmode:
38872 load_str = "lwz";
38873 break;
38875 case E_DImode:
38876 case E_DFmode:
38877 gcc_assert (TARGET_POWERPC64);
38878 load_str = "ld";
38879 break;
38881 default:
38882 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38885 /* Emit the addis instruction. */
38886 emit_fusion_addis (target, addis_value);
38888 /* Emit the D-form load instruction. */
38889 emit_fusion_load_store (target, target, load_offset, load_str);
38891 return "";
38895 /* Return true if the peephole2 can combine a load/store involving a
38896 combination of an addis instruction and the memory operation. This was
38897 added to the ISA 3.0 (power9) hardware. */
38899 bool
38900 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38901 rtx addis_value, /* addis value. */
38902 rtx dest, /* destination (memory or register). */
38903 rtx src) /* source (register or memory). */
38905 rtx addr, mem, offset;
38906 machine_mode mode = GET_MODE (src);
38908 /* Validate arguments. */
38909 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38910 return false;
38912 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38913 return false;
38915 /* Ignore extend operations that are part of the load. */
38916 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38917 src = XEXP (src, 0);
38919 /* Test for memory<-register or register<-memory. */
38920 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38922 if (!MEM_P (dest))
38923 return false;
38925 mem = dest;
38928 else if (MEM_P (src))
38930 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38931 return false;
38933 mem = src;
38936 else
38937 return false;
38939 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38940 if (GET_CODE (addr) == PLUS)
38942 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38943 return false;
38945 return satisfies_constraint_I (XEXP (addr, 1));
38948 else if (GET_CODE (addr) == LO_SUM)
38950 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38951 return false;
38953 offset = XEXP (addr, 1);
38954 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38955 return small_toc_ref (offset, GET_MODE (offset));
38957 else if (TARGET_ELF && !TARGET_POWERPC64)
38958 return CONSTANT_P (offset);
38961 return false;
38964 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38965 load sequence.
38967 The operands are:
38968 operands[0] register set with addis
38969 operands[1] value set via addis
38970 operands[2] target register being loaded
38971 operands[3] D-form memory reference using operands[0].
38973 This is similar to the fusion introduced with power8, except it scales to
38974 both loads/stores and does not require the result register to be the same as
38975 the base register. At the moment, we only do this if register set with addis
38976 is dead. */
38978 void
38979 expand_fusion_p9_load (rtx *operands)
38981 rtx tmp_reg = operands[0];
38982 rtx addis_value = operands[1];
38983 rtx target = operands[2];
38984 rtx orig_mem = operands[3];
38985 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38986 enum rtx_code plus_or_lo_sum;
38987 machine_mode target_mode = GET_MODE (target);
38988 machine_mode extend_mode = target_mode;
38989 machine_mode ptr_mode = Pmode;
38990 enum rtx_code extend = UNKNOWN;
38992 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38994 extend = GET_CODE (orig_mem);
38995 orig_mem = XEXP (orig_mem, 0);
38996 target_mode = GET_MODE (orig_mem);
38999 gcc_assert (MEM_P (orig_mem));
39001 orig_addr = XEXP (orig_mem, 0);
39002 plus_or_lo_sum = GET_CODE (orig_addr);
39003 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39005 offset = XEXP (orig_addr, 1);
39006 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39007 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39009 if (extend != UNKNOWN)
39010 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
39012 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
39013 UNSPEC_FUSION_P9);
39015 set = gen_rtx_SET (target, new_mem);
39016 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39017 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39018 emit_insn (insn);
39020 return;
39023 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39024 store sequence.
39026 The operands are:
39027 operands[0] register set with addis
39028 operands[1] value set via addis
39029 operands[2] target D-form memory being stored to
39030 operands[3] register being stored
39032 This is similar to the fusion introduced with power8, except it scales to
39033 both loads/stores and does not require the result register to be the same as
39034 the base register. At the moment, we only do this if register set with addis
39035 is dead. */
39037 void
39038 expand_fusion_p9_store (rtx *operands)
39040 rtx tmp_reg = operands[0];
39041 rtx addis_value = operands[1];
39042 rtx orig_mem = operands[2];
39043 rtx src = operands[3];
39044 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
39045 enum rtx_code plus_or_lo_sum;
39046 machine_mode target_mode = GET_MODE (orig_mem);
39047 machine_mode ptr_mode = Pmode;
39049 gcc_assert (MEM_P (orig_mem));
39051 orig_addr = XEXP (orig_mem, 0);
39052 plus_or_lo_sum = GET_CODE (orig_addr);
39053 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39055 offset = XEXP (orig_addr, 1);
39056 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39057 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39059 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
39060 UNSPEC_FUSION_P9);
39062 set = gen_rtx_SET (new_mem, new_src);
39063 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39064 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39065 emit_insn (insn);
39067 return;
39070 /* Return a string to fuse an addis instruction with a load using extended
39071 fusion. The address that is used is the logical address that was formed
39072 during peephole2: (lo_sum (high) (low-part))
39074 The code is complicated, so we call output_asm_insn directly, and just
39075 return "". */
39077 const char *
39078 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
39080 machine_mode mode = GET_MODE (reg);
39081 rtx hi;
39082 rtx lo;
39083 rtx addr;
39084 const char *load_string;
39085 int r;
39087 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
39089 mem = XEXP (mem, 0);
39090 mode = GET_MODE (mem);
39093 if (GET_CODE (reg) == SUBREG)
39095 gcc_assert (SUBREG_BYTE (reg) == 0);
39096 reg = SUBREG_REG (reg);
39099 if (!REG_P (reg))
39100 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
39102 r = REGNO (reg);
39103 if (FP_REGNO_P (r))
39105 if (mode == SFmode)
39106 load_string = "lfs";
39107 else if (mode == DFmode || mode == DImode)
39108 load_string = "lfd";
39109 else
39110 gcc_unreachable ();
39112 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39114 if (mode == SFmode)
39115 load_string = "lxssp";
39116 else if (mode == DFmode || mode == DImode)
39117 load_string = "lxsd";
39118 else
39119 gcc_unreachable ();
39121 else if (INT_REGNO_P (r))
39123 switch (mode)
39125 case E_QImode:
39126 load_string = "lbz";
39127 break;
39128 case E_HImode:
39129 load_string = "lhz";
39130 break;
39131 case E_SImode:
39132 case E_SFmode:
39133 load_string = "lwz";
39134 break;
39135 case E_DImode:
39136 case E_DFmode:
39137 if (!TARGET_POWERPC64)
39138 gcc_unreachable ();
39139 load_string = "ld";
39140 break;
39141 default:
39142 gcc_unreachable ();
39145 else
39146 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
39148 if (!MEM_P (mem))
39149 fatal_insn ("emit_fusion_p9_load not MEM", mem);
39151 addr = XEXP (mem, 0);
39152 fusion_split_address (addr, &hi, &lo);
39154 /* Emit the addis instruction. */
39155 emit_fusion_addis (tmp_reg, hi);
39157 /* Emit the D-form load instruction. */
39158 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
39160 return "";
39163 /* Return a string to fuse an addis instruction with a store using extended
39164 fusion. The address that is used is the logical address that was formed
39165 during peephole2: (lo_sum (high) (low-part))
39167 The code is complicated, so we call output_asm_insn directly, and just
39168 return "". */
39170 const char *
39171 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
39173 machine_mode mode = GET_MODE (reg);
39174 rtx hi;
39175 rtx lo;
39176 rtx addr;
39177 const char *store_string;
39178 int r;
39180 if (GET_CODE (reg) == SUBREG)
39182 gcc_assert (SUBREG_BYTE (reg) == 0);
39183 reg = SUBREG_REG (reg);
39186 if (!REG_P (reg))
39187 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
39189 r = REGNO (reg);
39190 if (FP_REGNO_P (r))
39192 if (mode == SFmode)
39193 store_string = "stfs";
39194 else if (mode == DFmode)
39195 store_string = "stfd";
39196 else
39197 gcc_unreachable ();
39199 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39201 if (mode == SFmode)
39202 store_string = "stxssp";
39203 else if (mode == DFmode || mode == DImode)
39204 store_string = "stxsd";
39205 else
39206 gcc_unreachable ();
39208 else if (INT_REGNO_P (r))
39210 switch (mode)
39212 case E_QImode:
39213 store_string = "stb";
39214 break;
39215 case E_HImode:
39216 store_string = "sth";
39217 break;
39218 case E_SImode:
39219 case E_SFmode:
39220 store_string = "stw";
39221 break;
39222 case E_DImode:
39223 case E_DFmode:
39224 if (!TARGET_POWERPC64)
39225 gcc_unreachable ();
39226 store_string = "std";
39227 break;
39228 default:
39229 gcc_unreachable ();
39232 else
39233 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
39235 if (!MEM_P (mem))
39236 fatal_insn ("emit_fusion_p9_store not MEM", mem);
39238 addr = XEXP (mem, 0);
39239 fusion_split_address (addr, &hi, &lo);
39241 /* Emit the addis instruction. */
39242 emit_fusion_addis (tmp_reg, hi);
39244 /* Emit the D-form load instruction. */
39245 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
39247 return "";
39250 #ifdef RS6000_GLIBC_ATOMIC_FENV
39251 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39252 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39253 #endif
39255 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39257 static void
39258 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39260 if (!TARGET_HARD_FLOAT)
39262 #ifdef RS6000_GLIBC_ATOMIC_FENV
39263 if (atomic_hold_decl == NULL_TREE)
39265 atomic_hold_decl
39266 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39267 get_identifier ("__atomic_feholdexcept"),
39268 build_function_type_list (void_type_node,
39269 double_ptr_type_node,
39270 NULL_TREE));
39271 TREE_PUBLIC (atomic_hold_decl) = 1;
39272 DECL_EXTERNAL (atomic_hold_decl) = 1;
39275 if (atomic_clear_decl == NULL_TREE)
39277 atomic_clear_decl
39278 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39279 get_identifier ("__atomic_feclearexcept"),
39280 build_function_type_list (void_type_node,
39281 NULL_TREE));
39282 TREE_PUBLIC (atomic_clear_decl) = 1;
39283 DECL_EXTERNAL (atomic_clear_decl) = 1;
39286 tree const_double = build_qualified_type (double_type_node,
39287 TYPE_QUAL_CONST);
39288 tree const_double_ptr = build_pointer_type (const_double);
39289 if (atomic_update_decl == NULL_TREE)
39291 atomic_update_decl
39292 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39293 get_identifier ("__atomic_feupdateenv"),
39294 build_function_type_list (void_type_node,
39295 const_double_ptr,
39296 NULL_TREE));
39297 TREE_PUBLIC (atomic_update_decl) = 1;
39298 DECL_EXTERNAL (atomic_update_decl) = 1;
39301 tree fenv_var = create_tmp_var_raw (double_type_node);
39302 TREE_ADDRESSABLE (fenv_var) = 1;
39303 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39305 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39306 *clear = build_call_expr (atomic_clear_decl, 0);
39307 *update = build_call_expr (atomic_update_decl, 1,
39308 fold_convert (const_double_ptr, fenv_addr));
39309 #endif
39310 return;
39313 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39314 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39315 tree call_mffs = build_call_expr (mffs, 0);
39317 /* Generates the equivalent of feholdexcept (&fenv_var)
39319 *fenv_var = __builtin_mffs ();
39320 double fenv_hold;
39321 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39322 __builtin_mtfsf (0xff, fenv_hold); */
39324 /* Mask to clear everything except for the rounding modes and non-IEEE
39325 arithmetic flag. */
39326 const unsigned HOST_WIDE_INT hold_exception_mask =
39327 HOST_WIDE_INT_C (0xffffffff00000007);
39329 tree fenv_var = create_tmp_var_raw (double_type_node);
39331 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39333 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39334 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39335 build_int_cst (uint64_type_node,
39336 hold_exception_mask));
39338 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39339 fenv_llu_and);
39341 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39342 build_int_cst (unsigned_type_node, 0xff),
39343 fenv_hold_mtfsf);
39345 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39347 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39349 double fenv_clear = __builtin_mffs ();
39350 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39351 __builtin_mtfsf (0xff, fenv_clear); */
39353 /* Mask to clear everything except for the rounding modes and non-IEEE
39354 arithmetic flag. */
39355 const unsigned HOST_WIDE_INT clear_exception_mask =
39356 HOST_WIDE_INT_C (0xffffffff00000000);
39358 tree fenv_clear = create_tmp_var_raw (double_type_node);
39360 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39362 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39363 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39364 fenv_clean_llu,
39365 build_int_cst (uint64_type_node,
39366 clear_exception_mask));
39368 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39369 fenv_clear_llu_and);
39371 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39372 build_int_cst (unsigned_type_node, 0xff),
39373 fenv_clear_mtfsf);
39375 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39377 /* Generates the equivalent of feupdateenv (&fenv_var)
39379 double old_fenv = __builtin_mffs ();
39380 double fenv_update;
39381 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39382 (*(uint64_t*)fenv_var 0x1ff80fff);
39383 __builtin_mtfsf (0xff, fenv_update); */
39385 const unsigned HOST_WIDE_INT update_exception_mask =
39386 HOST_WIDE_INT_C (0xffffffff1fffff00);
39387 const unsigned HOST_WIDE_INT new_exception_mask =
39388 HOST_WIDE_INT_C (0x1ff80fff);
39390 tree old_fenv = create_tmp_var_raw (double_type_node);
39391 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39393 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39394 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39395 build_int_cst (uint64_type_node,
39396 update_exception_mask));
39398 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39399 build_int_cst (uint64_type_node,
39400 new_exception_mask));
39402 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39403 old_llu_and, new_llu_and);
39405 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39406 new_llu_mask);
39408 tree update_mtfsf = build_call_expr (mtfsf, 2,
39409 build_int_cst (unsigned_type_node, 0xff),
39410 fenv_update_mtfsf);
39412 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39415 void
39416 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39418 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39420 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39421 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39423 /* The destination of the vmrgew instruction layout is:
39424 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39425 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39426 vmrgew instruction will be correct. */
39427 if (VECTOR_ELT_ORDER_BIG)
39429 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39430 GEN_INT (0)));
39431 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39432 GEN_INT (3)));
39434 else
39436 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39437 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39440 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39441 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39443 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39444 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39446 if (VECTOR_ELT_ORDER_BIG)
39447 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39448 else
39449 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39452 void
39453 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39455 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39457 rtx_tmp0 = gen_reg_rtx (V2DImode);
39458 rtx_tmp1 = gen_reg_rtx (V2DImode);
39460 /* The destination of the vmrgew instruction layout is:
39461 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39462 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39463 vmrgew instruction will be correct. */
39464 if (VECTOR_ELT_ORDER_BIG)
39466 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39467 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39469 else
39471 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39472 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39475 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39476 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39478 if (signed_convert)
39480 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39481 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39483 else
39485 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39486 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39489 if (VECTOR_ELT_ORDER_BIG)
39490 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39491 else
39492 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39495 void
39496 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39497 rtx src2)
39499 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39501 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39502 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39504 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39505 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39507 rtx_tmp2 = gen_reg_rtx (V4SImode);
39508 rtx_tmp3 = gen_reg_rtx (V4SImode);
39510 if (signed_convert)
39512 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39513 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39515 else
39517 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39518 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39521 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39524 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39526 static bool
39527 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39528 optimization_type opt_type)
39530 switch (op)
39532 case rsqrt_optab:
39533 return (opt_type == OPTIMIZE_FOR_SPEED
39534 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39536 default:
39537 return true;
39541 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39543 static HOST_WIDE_INT
39544 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39546 if (TREE_CODE (exp) == STRING_CST
39547 && (STRICT_ALIGNMENT || !optimize_size))
39548 return MAX (align, BITS_PER_WORD);
39549 return align;
39552 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39554 static HOST_WIDE_INT
39555 rs6000_starting_frame_offset (void)
39557 if (FRAME_GROWS_DOWNWARD)
39558 return 0;
39559 return RS6000_STARTING_FRAME_OFFSET;
39562 struct gcc_target targetm = TARGET_INITIALIZER;
39564 #include "gt-rs6000.h"