rs6000.c (rs6000_emit_move): If we load or store a long double type...
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob93432f6d5271d5313d6dadb0536c87dbbdfbab75
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
106 #define min(A,B) ((A) < (B) ? (A) : (B))
107 #define max(A,B) ((A) > (B) ? (A) : (B))
109 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
111 /* Structure used to define the rs6000 stack */
112 typedef struct rs6000_stack {
113 int reload_completed; /* stack info won't change from here on */
114 int first_gp_reg_save; /* first callee saved GP register used */
115 int first_fp_reg_save; /* first callee saved FP register used */
116 int first_altivec_reg_save; /* first callee saved AltiVec register used */
117 int lr_save_p; /* true if the link reg needs to be saved */
118 int cr_save_p; /* true if the CR reg needs to be saved */
119 unsigned int vrsave_mask; /* mask of vec registers to save */
120 int push_p; /* true if we need to allocate stack space */
121 int calls_p; /* true if the function makes any calls */
122 int world_save_p; /* true if we're saving *everything*:
123 r13-r31, cr, f14-f31, vrsave, v20-v31 */
124 enum rs6000_abi abi; /* which ABI to use */
125 int gp_save_offset; /* offset to save GP regs from initial SP */
126 int fp_save_offset; /* offset to save FP regs from initial SP */
127 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
128 int lr_save_offset; /* offset to save LR from initial SP */
129 int cr_save_offset; /* offset to save CR from initial SP */
130 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
131 int varargs_save_offset; /* offset to save the varargs registers */
132 int ehrd_offset; /* offset to EH return data */
133 int ehcr_offset; /* offset to EH CR field data */
134 int reg_size; /* register size (4 or 8) */
135 HOST_WIDE_INT vars_size; /* variable save area size */
136 int parm_size; /* outgoing parameter size */
137 int save_size; /* save area size */
138 int fixed_size; /* fixed size of stack frame */
139 int gp_size; /* size of saved GP registers */
140 int fp_size; /* size of saved FP registers */
141 int altivec_size; /* size of saved AltiVec registers */
142 int cr_size; /* size to hold CR if not in fixed area */
143 int vrsave_size; /* size to hold VRSAVE */
144 int altivec_padding_size; /* size of altivec alignment padding */
145 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
146 int savres_strategy;
147 } rs6000_stack_t;
149 /* A C structure for machine-specific, per-function data.
150 This is added to the cfun structure. */
151 typedef struct GTY(()) machine_function
153 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
154 int ra_needs_full_frame;
155 /* Flags if __builtin_return_address (0) was used. */
156 int ra_need_lr;
157 /* Cache lr_save_p after expansion of builtin_eh_return. */
158 int lr_save_state;
159 /* Whether we need to save the TOC to the reserved stack location in the
160 function prologue. */
161 bool save_toc_in_prologue;
162 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
163 varargs save area. */
164 HOST_WIDE_INT varargs_save_offset;
165 /* Alternative internal arg pointer for -fsplit-stack. */
166 rtx split_stack_arg_pointer;
167 bool split_stack_argp_used;
168 /* Flag if r2 setup is needed with ELFv2 ABI. */
169 bool r2_setup_needed;
170 /* The number of components we use for separate shrink-wrapping. */
171 int n_components;
172 /* The components already handled by separate shrink-wrapping, which should
173 not be considered by the prologue and epilogue. */
174 bool gpr_is_wrapped_separately[32];
175 bool fpr_is_wrapped_separately[32];
176 bool lr_is_wrapped_separately;
177 bool toc_is_wrapped_separately;
178 } machine_function;
180 /* Support targetm.vectorize.builtin_mask_for_load. */
181 static GTY(()) tree altivec_builtin_mask_for_load;
183 /* Set to nonzero once AIX common-mode calls have been defined. */
184 static GTY(()) int common_mode_defined;
186 /* Label number of label created for -mrelocatable, to call to so we can
187 get the address of the GOT section */
188 static int rs6000_pic_labelno;
190 #ifdef USING_ELFOS_H
191 /* Counter for labels which are to be placed in .fixup. */
192 int fixuplabelno = 0;
193 #endif
195 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
196 int dot_symbols;
198 /* Specify the machine mode that pointers have. After generation of rtl, the
199 compiler makes no further distinction between pointers and any other objects
200 of this machine mode. */
201 scalar_int_mode rs6000_pmode;
203 /* Width in bits of a pointer. */
204 unsigned rs6000_pointer_size;
206 #ifdef HAVE_AS_GNU_ATTRIBUTE
207 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
208 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
209 # endif
210 /* Flag whether floating point values have been passed/returned.
211 Note that this doesn't say whether fprs are used, since the
212 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
213 should be set for soft-float values passed in gprs and ieee128
214 values passed in vsx registers. */
215 static bool rs6000_passes_float;
216 static bool rs6000_passes_long_double;
217 /* Flag whether vector values have been passed/returned. */
218 static bool rs6000_passes_vector;
219 /* Flag whether small (<= 8 byte) structures have been returned. */
220 static bool rs6000_returns_struct;
221 #endif
223 /* Value is TRUE if register/mode pair is acceptable. */
224 static bool rs6000_hard_regno_mode_ok_p
225 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
227 /* Maximum number of registers needed for a given register class and mode. */
228 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
230 /* How many registers are needed for a given register and mode. */
231 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
233 /* Map register number to register class. */
234 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
236 static int dbg_cost_ctrl;
238 /* Built in types. */
239 tree rs6000_builtin_types[RS6000_BTI_MAX];
240 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
242 /* Flag to say the TOC is initialized */
243 int toc_initialized, need_toc_init;
244 char toc_label_name[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more;
250 static GTY(()) section *read_only_data_section;
251 static GTY(()) section *private_data_section;
252 static GTY(()) section *tls_data_section;
253 static GTY(()) section *tls_private_data_section;
254 static GTY(()) section *read_only_private_data_section;
255 static GTY(()) section *sdata2_section;
256 static GTY(()) section *toc_section;
258 struct builtin_description
260 const HOST_WIDE_INT mask;
261 const enum insn_code icode;
262 const char *const name;
263 const enum rs6000_builtins code;
266 /* Describe the vector unit used for modes. */
267 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
268 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
270 /* Register classes for various constraints that are based on the target
271 switches. */
272 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
274 /* Describe the alignment of a vector. */
275 int rs6000_vector_align[NUM_MACHINE_MODES];
277 /* Map selected modes to types for builtins. */
278 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
280 /* What modes to automatically generate reciprocal divide estimate (fre) and
281 reciprocal sqrt (frsqrte) for. */
282 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
284 /* Masks to determine which reciprocal esitmate instructions to generate
285 automatically. */
286 enum rs6000_recip_mask {
287 RECIP_SF_DIV = 0x001, /* Use divide estimate */
288 RECIP_DF_DIV = 0x002,
289 RECIP_V4SF_DIV = 0x004,
290 RECIP_V2DF_DIV = 0x008,
292 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
293 RECIP_DF_RSQRT = 0x020,
294 RECIP_V4SF_RSQRT = 0x040,
295 RECIP_V2DF_RSQRT = 0x080,
297 /* Various combination of flags for -mrecip=xxx. */
298 RECIP_NONE = 0,
299 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
301 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
303 RECIP_HIGH_PRECISION = RECIP_ALL,
305 /* On low precision machines like the power5, don't enable double precision
306 reciprocal square root estimate, since it isn't accurate enough. */
307 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
310 /* -mrecip options. */
311 static struct
313 const char *string; /* option name */
314 unsigned int mask; /* mask bits to set */
315 } recip_options[] = {
316 { "all", RECIP_ALL },
317 { "none", RECIP_NONE },
318 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
319 | RECIP_V2DF_DIV) },
320 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
321 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
322 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
323 | RECIP_V2DF_RSQRT) },
324 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
325 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
328 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
329 static const struct
331 const char *cpu;
332 unsigned int cpuid;
333 } cpu_is_info[] = {
334 { "power9", PPC_PLATFORM_POWER9 },
335 { "power8", PPC_PLATFORM_POWER8 },
336 { "power7", PPC_PLATFORM_POWER7 },
337 { "power6x", PPC_PLATFORM_POWER6X },
338 { "power6", PPC_PLATFORM_POWER6 },
339 { "power5+", PPC_PLATFORM_POWER5_PLUS },
340 { "power5", PPC_PLATFORM_POWER5 },
341 { "ppc970", PPC_PLATFORM_PPC970 },
342 { "power4", PPC_PLATFORM_POWER4 },
343 { "ppca2", PPC_PLATFORM_PPCA2 },
344 { "ppc476", PPC_PLATFORM_PPC476 },
345 { "ppc464", PPC_PLATFORM_PPC464 },
346 { "ppc440", PPC_PLATFORM_PPC440 },
347 { "ppc405", PPC_PLATFORM_PPC405 },
348 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
351 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
352 static const struct
354 const char *hwcap;
355 int mask;
356 unsigned int id;
357 } cpu_supports_info[] = {
358 /* AT_HWCAP masks. */
359 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
360 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
361 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
362 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
363 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
364 { "booke", PPC_FEATURE_BOOKE, 0 },
365 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
366 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
367 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
368 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
369 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
370 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
371 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
372 { "notb", PPC_FEATURE_NO_TB, 0 },
373 { "pa6t", PPC_FEATURE_PA6T, 0 },
374 { "power4", PPC_FEATURE_POWER4, 0 },
375 { "power5", PPC_FEATURE_POWER5, 0 },
376 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
377 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
378 { "ppc32", PPC_FEATURE_32, 0 },
379 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
380 { "ppc64", PPC_FEATURE_64, 0 },
381 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
382 { "smt", PPC_FEATURE_SMT, 0 },
383 { "spe", PPC_FEATURE_HAS_SPE, 0 },
384 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
385 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
386 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
388 /* AT_HWCAP2 masks. */
389 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
390 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
391 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
392 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
393 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
394 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
395 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
396 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
397 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
398 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
399 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
400 { "darn", PPC_FEATURE2_DARN, 1 },
401 { "scv", PPC_FEATURE2_SCV, 1 }
404 /* On PowerPC, we have a limited number of target clones that we care about
405 which means we can use an array to hold the options, rather than having more
406 elaborate data structures to identify each possible variation. Order the
407 clones from the default to the highest ISA. */
408 enum {
409 CLONE_DEFAULT = 0, /* default clone. */
410 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
411 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
412 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
413 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
414 CLONE_MAX
417 /* Map compiler ISA bits into HWCAP names. */
418 struct clone_map {
419 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
420 const char *name; /* name to use in __builtin_cpu_supports. */
423 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
424 { 0, "" }, /* Default options. */
425 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
426 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
427 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
428 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
432 /* Newer LIBCs explicitly export this symbol to declare that they provide
433 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
434 reference to this symbol whenever we expand a CPU builtin, so that
435 we never link against an old LIBC. */
436 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
438 /* True if we have expanded a CPU builtin. */
439 bool cpu_builtin_p;
441 /* Pointer to function (in rs6000-c.c) that can define or undefine target
442 macros that have changed. Languages that don't support the preprocessor
443 don't link in rs6000-c.c, so we can't call it directly. */
444 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
446 /* Simplfy register classes into simpler classifications. We assume
447 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
448 check for standard register classes (gpr/floating/altivec/vsx) and
449 floating/vector classes (float/altivec/vsx). */
451 enum rs6000_reg_type {
452 NO_REG_TYPE,
453 PSEUDO_REG_TYPE,
454 GPR_REG_TYPE,
455 VSX_REG_TYPE,
456 ALTIVEC_REG_TYPE,
457 FPR_REG_TYPE,
458 SPR_REG_TYPE,
459 CR_REG_TYPE
462 /* Map register class to register type. */
463 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
465 /* First/last register type for the 'normal' register types (i.e. general
466 purpose, floating point, altivec, and VSX registers). */
467 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
469 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
472 /* Register classes we care about in secondary reload or go if legitimate
473 address. We only need to worry about GPR, FPR, and Altivec registers here,
474 along an ANY field that is the OR of the 3 register classes. */
476 enum rs6000_reload_reg_type {
477 RELOAD_REG_GPR, /* General purpose registers. */
478 RELOAD_REG_FPR, /* Traditional floating point regs. */
479 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
480 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
481 N_RELOAD_REG
484 /* For setting up register classes, loop through the 3 register classes mapping
485 into real registers, and skip the ANY class, which is just an OR of the
486 bits. */
487 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
488 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
490 /* Map reload register type to a register in the register class. */
491 struct reload_reg_map_type {
492 const char *name; /* Register class name. */
493 int reg; /* Register in the register class. */
496 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
497 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
498 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
499 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
500 { "Any", -1 }, /* RELOAD_REG_ANY. */
503 /* Mask bits for each register class, indexed per mode. Historically the
504 compiler has been more restrictive which types can do PRE_MODIFY instead of
505 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
506 typedef unsigned char addr_mask_type;
508 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
509 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
510 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
511 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
512 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
513 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
514 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
515 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
517 /* Register type masks based on the type, of valid addressing modes. */
518 struct rs6000_reg_addr {
519 enum insn_code reload_load; /* INSN to reload for loading. */
520 enum insn_code reload_store; /* INSN to reload for storing. */
521 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
522 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
523 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
524 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
525 /* INSNs for fusing addi with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
529 /* INSNs for fusing addis with loads
530 or stores for each reg. class. */
531 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
532 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
533 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
534 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
535 bool fused_toc; /* Mode supports TOC fusion. */
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
556 /* Given that there exists at least one variable that is set (produced)
557 by OUT_INSN and read (consumed) by IN_INSN, return true iff
558 IN_INSN represents one or more memory store operations and none of
559 the variables set by OUT_INSN is used by IN_INSN as the address of a
560 store operation. If either IN_INSN or OUT_INSN does not represent
561 a "single" RTL SET expression (as loosely defined by the
562 implementation of the single_set function) or a PARALLEL with only
563 SETs, CLOBBERs, and USEs inside, this function returns false.
565 This rs6000-specific version of store_data_bypass_p checks for
566 certain conditions that result in assertion failures (and internal
567 compiler errors) in the generic store_data_bypass_p function and
568 returns false rather than calling store_data_bypass_p if one of the
569 problematic conditions is detected. */
572 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
574 rtx out_set, in_set;
575 rtx out_pat, in_pat;
576 rtx out_exp, in_exp;
577 int i, j;
579 in_set = single_set (in_insn);
580 if (in_set)
582 if (MEM_P (SET_DEST (in_set)))
584 out_set = single_set (out_insn);
585 if (!out_set)
587 out_pat = PATTERN (out_insn);
588 if (GET_CODE (out_pat) == PARALLEL)
590 for (i = 0; i < XVECLEN (out_pat, 0); i++)
592 out_exp = XVECEXP (out_pat, 0, i);
593 if ((GET_CODE (out_exp) == CLOBBER)
594 || (GET_CODE (out_exp) == USE))
595 continue;
596 else if (GET_CODE (out_exp) != SET)
597 return false;
603 else
605 in_pat = PATTERN (in_insn);
606 if (GET_CODE (in_pat) != PARALLEL)
607 return false;
609 for (i = 0; i < XVECLEN (in_pat, 0); i++)
611 in_exp = XVECEXP (in_pat, 0, i);
612 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
613 continue;
614 else if (GET_CODE (in_exp) != SET)
615 return false;
617 if (MEM_P (SET_DEST (in_exp)))
619 out_set = single_set (out_insn);
620 if (!out_set)
622 out_pat = PATTERN (out_insn);
623 if (GET_CODE (out_pat) != PARALLEL)
624 return false;
625 for (j = 0; j < XVECLEN (out_pat, 0); j++)
627 out_exp = XVECEXP (out_pat, 0, j);
628 if ((GET_CODE (out_exp) == CLOBBER)
629 || (GET_CODE (out_exp) == USE))
630 continue;
631 else if (GET_CODE (out_exp) != SET)
632 return false;
638 return store_data_bypass_p (out_insn, in_insn);
641 /* Return true if we have D-form addressing in altivec registers. */
642 static inline bool
643 mode_supports_vmx_dform (machine_mode mode)
645 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
648 /* Return true if we have D-form addressing in VSX registers. This addressing
649 is more limited than normal d-form addressing in that the offset must be
650 aligned on a 16-byte boundary. */
651 static inline bool
652 mode_supports_vsx_dform_quad (machine_mode mode)
654 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
655 != 0);
659 /* Processor costs (relative to an add) */
661 const struct processor_costs *rs6000_cost;
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_Q
1275 #undef RS6000_BUILTIN_X
1277 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 struct rs6000_builtin_info_type {
1308 const char *name;
1309 const enum insn_code icode;
1310 const HOST_WIDE_INT mask;
1311 const unsigned attr;
1314 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1316 #include "rs6000-builtin.def"
1319 #undef RS6000_BUILTIN_0
1320 #undef RS6000_BUILTIN_1
1321 #undef RS6000_BUILTIN_2
1322 #undef RS6000_BUILTIN_3
1323 #undef RS6000_BUILTIN_A
1324 #undef RS6000_BUILTIN_D
1325 #undef RS6000_BUILTIN_H
1326 #undef RS6000_BUILTIN_P
1327 #undef RS6000_BUILTIN_Q
1328 #undef RS6000_BUILTIN_X
1330 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1331 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1334 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1335 static struct machine_function * rs6000_init_machine_status (void);
1336 static int rs6000_ra_ever_killed (void);
1337 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1338 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1339 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1341 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1342 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1343 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1344 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1345 bool);
1346 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1347 unsigned int);
1348 static bool is_microcoded_insn (rtx_insn *);
1349 static bool is_nonpipeline_insn (rtx_insn *);
1350 static bool is_cracked_insn (rtx_insn *);
1351 static bool is_load_insn (rtx, rtx *);
1352 static bool is_store_insn (rtx, rtx *);
1353 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1354 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1355 static bool insn_must_be_first_in_group (rtx_insn *);
1356 static bool insn_must_be_last_in_group (rtx_insn *);
1357 static void altivec_init_builtins (void);
1358 static tree builtin_function_type (machine_mode, machine_mode,
1359 machine_mode, machine_mode,
1360 enum rs6000_builtins, const char *name);
1361 static void rs6000_common_init_builtins (void);
1362 static void paired_init_builtins (void);
1363 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1364 static void htm_init_builtins (void);
1365 static rs6000_stack_t *rs6000_stack_info (void);
1366 static void is_altivec_return_reg (rtx, void *);
1367 int easy_vector_constant (rtx, machine_mode);
1368 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1369 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1370 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1371 bool, bool);
1372 #if TARGET_MACHO
1373 static void macho_branch_islands (void);
1374 #endif
1375 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1376 int, int *);
1377 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1378 int, int, int *);
1379 static bool rs6000_mode_dependent_address (const_rtx);
1380 static bool rs6000_debug_mode_dependent_address (const_rtx);
1381 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1382 machine_mode, rtx);
1383 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1384 machine_mode,
1385 rtx);
1386 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1387 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1388 enum reg_class);
1389 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1390 reg_class_t,
1391 reg_class_t);
1392 static bool rs6000_debug_can_change_mode_class (machine_mode,
1393 machine_mode,
1394 reg_class_t);
1395 static bool rs6000_save_toc_in_prologue_p (void);
1396 static rtx rs6000_internal_arg_pointer (void);
1398 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1399 int, int *)
1400 = rs6000_legitimize_reload_address;
1402 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1403 = rs6000_mode_dependent_address;
1405 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1406 machine_mode, rtx)
1407 = rs6000_secondary_reload_class;
1409 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1410 = rs6000_preferred_reload_class;
1412 const int INSN_NOT_AVAILABLE = -1;
1414 static void rs6000_print_isa_options (FILE *, int, const char *,
1415 HOST_WIDE_INT);
1416 static void rs6000_print_builtin_options (FILE *, int, const char *,
1417 HOST_WIDE_INT);
1418 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1420 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1421 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1422 enum rs6000_reg_type,
1423 machine_mode,
1424 secondary_reload_info *,
1425 bool);
1426 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1427 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1428 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1430 /* Hash table stuff for keeping track of TOC entries. */
1432 struct GTY((for_user)) toc_hash_struct
1434 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1435 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1436 rtx key;
1437 machine_mode key_mode;
1438 int labelno;
1441 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1443 static hashval_t hash (toc_hash_struct *);
1444 static bool equal (toc_hash_struct *, toc_hash_struct *);
1447 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1449 /* Hash table to keep track of the argument types for builtin functions. */
1451 struct GTY((for_user)) builtin_hash_struct
1453 tree type;
1454 machine_mode mode[4]; /* return value + 3 arguments. */
1455 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1458 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1460 static hashval_t hash (builtin_hash_struct *);
1461 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1464 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1467 /* Default register names. */
1468 char rs6000_reg_names[][8] =
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "8", "9", "10", "11", "12", "13", "14", "15",
1476 "16", "17", "18", "19", "20", "21", "22", "23",
1477 "24", "25", "26", "27", "28", "29", "30", "31",
1478 "mq", "lr", "ctr","ap",
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "ca",
1481 /* AltiVec registers. */
1482 "0", "1", "2", "3", "4", "5", "6", "7",
1483 "8", "9", "10", "11", "12", "13", "14", "15",
1484 "16", "17", "18", "19", "20", "21", "22", "23",
1485 "24", "25", "26", "27", "28", "29", "30", "31",
1486 "vrsave", "vscr",
1487 /* Soft frame pointer. */
1488 "sfp",
1489 /* HTM SPR registers. */
1490 "tfhar", "tfiar", "texasr"
1493 #ifdef TARGET_REGNAMES
1494 static const char alt_reg_names[][8] =
1496 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1497 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1498 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1499 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1500 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1501 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1502 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1503 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1504 "mq", "lr", "ctr", "ap",
1505 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1506 "ca",
1507 /* AltiVec registers. */
1508 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1509 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1510 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1511 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1512 "vrsave", "vscr",
1513 /* Soft frame pointer. */
1514 "sfp",
1515 /* HTM SPR registers. */
1516 "tfhar", "tfiar", "texasr"
1518 #endif
1520 /* Table of valid machine attributes. */
1522 static const struct attribute_spec rs6000_attribute_table[] =
1524 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1525 affects_type_identity, handler, exclude } */
1526 { "altivec", 1, 1, false, true, false, false,
1527 rs6000_handle_altivec_attribute, NULL },
1528 { "longcall", 0, 0, false, true, true, false,
1529 rs6000_handle_longcall_attribute, NULL },
1530 { "shortcall", 0, 0, false, true, true, false,
1531 rs6000_handle_longcall_attribute, NULL },
1532 { "ms_struct", 0, 0, false, false, false, false,
1533 rs6000_handle_struct_attribute, NULL },
1534 { "gcc_struct", 0, 0, false, false, false, false,
1535 rs6000_handle_struct_attribute, NULL },
1536 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1537 SUBTARGET_ATTRIBUTE_TABLE,
1538 #endif
1539 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1542 #ifndef TARGET_PROFILE_KERNEL
1543 #define TARGET_PROFILE_KERNEL 0
1544 #endif
1546 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1547 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1549 /* Initialize the GCC target structure. */
1550 #undef TARGET_ATTRIBUTE_TABLE
1551 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1552 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1553 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1554 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1555 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1557 #undef TARGET_ASM_ALIGNED_DI_OP
1558 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1560 /* Default unaligned ops are only provided for ELF. Find the ops needed
1561 for non-ELF systems. */
1562 #ifndef OBJECT_FORMAT_ELF
1563 #if TARGET_XCOFF
1564 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1565 64-bit targets. */
1566 #undef TARGET_ASM_UNALIGNED_HI_OP
1567 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1568 #undef TARGET_ASM_UNALIGNED_SI_OP
1569 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1570 #undef TARGET_ASM_UNALIGNED_DI_OP
1571 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1572 #else
1573 /* For Darwin. */
1574 #undef TARGET_ASM_UNALIGNED_HI_OP
1575 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1576 #undef TARGET_ASM_UNALIGNED_SI_OP
1577 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1578 #undef TARGET_ASM_UNALIGNED_DI_OP
1579 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1580 #undef TARGET_ASM_ALIGNED_DI_OP
1581 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1582 #endif
1583 #endif
1585 /* This hook deals with fixups for relocatable code and DI-mode objects
1586 in 64-bit code. */
1587 #undef TARGET_ASM_INTEGER
1588 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1590 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1591 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1592 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1593 #endif
1595 #undef TARGET_SET_UP_BY_PROLOGUE
1596 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1598 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1600 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1601 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1602 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1604 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1605 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1606 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1607 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1608 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1609 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1611 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1612 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1614 #undef TARGET_INTERNAL_ARG_POINTER
1615 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1617 #undef TARGET_HAVE_TLS
1618 #define TARGET_HAVE_TLS HAVE_AS_TLS
1620 #undef TARGET_CANNOT_FORCE_CONST_MEM
1621 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1623 #undef TARGET_DELEGITIMIZE_ADDRESS
1624 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1626 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1627 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1629 #undef TARGET_LEGITIMATE_COMBINED_INSN
1630 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1632 #undef TARGET_ASM_FUNCTION_PROLOGUE
1633 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1634 #undef TARGET_ASM_FUNCTION_EPILOGUE
1635 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1637 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1638 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1640 #undef TARGET_LEGITIMIZE_ADDRESS
1641 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1643 #undef TARGET_SCHED_VARIABLE_ISSUE
1644 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1646 #undef TARGET_SCHED_ISSUE_RATE
1647 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1648 #undef TARGET_SCHED_ADJUST_COST
1649 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1650 #undef TARGET_SCHED_ADJUST_PRIORITY
1651 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1652 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1653 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1654 #undef TARGET_SCHED_INIT
1655 #define TARGET_SCHED_INIT rs6000_sched_init
1656 #undef TARGET_SCHED_FINISH
1657 #define TARGET_SCHED_FINISH rs6000_sched_finish
1658 #undef TARGET_SCHED_REORDER
1659 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1660 #undef TARGET_SCHED_REORDER2
1661 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1666 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1667 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1669 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1670 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1671 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1672 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1673 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1674 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1675 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1676 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1678 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1679 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1681 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1682 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1683 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1684 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1685 rs6000_builtin_support_vector_misalignment
1686 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1687 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1688 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1689 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1690 rs6000_builtin_vectorization_cost
1691 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1692 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1693 rs6000_preferred_simd_mode
1694 #undef TARGET_VECTORIZE_INIT_COST
1695 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1696 #undef TARGET_VECTORIZE_ADD_STMT_COST
1697 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1698 #undef TARGET_VECTORIZE_FINISH_COST
1699 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1700 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1701 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1703 #undef TARGET_INIT_BUILTINS
1704 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1705 #undef TARGET_BUILTIN_DECL
1706 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1708 #undef TARGET_FOLD_BUILTIN
1709 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1710 #undef TARGET_GIMPLE_FOLD_BUILTIN
1711 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1713 #undef TARGET_EXPAND_BUILTIN
1714 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1716 #undef TARGET_MANGLE_TYPE
1717 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1719 #undef TARGET_INIT_LIBFUNCS
1720 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1722 #if TARGET_MACHO
1723 #undef TARGET_BINDS_LOCAL_P
1724 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1725 #endif
1727 #undef TARGET_MS_BITFIELD_LAYOUT_P
1728 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1730 #undef TARGET_ASM_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1733 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1734 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1736 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1737 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1739 #undef TARGET_REGISTER_MOVE_COST
1740 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1741 #undef TARGET_MEMORY_MOVE_COST
1742 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1743 #undef TARGET_CANNOT_COPY_INSN_P
1744 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1745 #undef TARGET_RTX_COSTS
1746 #define TARGET_RTX_COSTS rs6000_rtx_costs
1747 #undef TARGET_ADDRESS_COST
1748 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1749 #undef TARGET_INSN_COST
1750 #define TARGET_INSN_COST rs6000_insn_cost
1752 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1753 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1755 #undef TARGET_PROMOTE_FUNCTION_MODE
1756 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1758 #undef TARGET_RETURN_IN_MEMORY
1759 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1761 #undef TARGET_RETURN_IN_MSB
1762 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1764 #undef TARGET_SETUP_INCOMING_VARARGS
1765 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1767 /* Always strict argument naming on rs6000. */
1768 #undef TARGET_STRICT_ARGUMENT_NAMING
1769 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1770 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1771 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1772 #undef TARGET_SPLIT_COMPLEX_ARG
1773 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1774 #undef TARGET_MUST_PASS_IN_STACK
1775 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1776 #undef TARGET_PASS_BY_REFERENCE
1777 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1778 #undef TARGET_ARG_PARTIAL_BYTES
1779 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1780 #undef TARGET_FUNCTION_ARG_ADVANCE
1781 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1782 #undef TARGET_FUNCTION_ARG
1783 #define TARGET_FUNCTION_ARG rs6000_function_arg
1784 #undef TARGET_FUNCTION_ARG_PADDING
1785 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1786 #undef TARGET_FUNCTION_ARG_BOUNDARY
1787 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1789 #undef TARGET_BUILD_BUILTIN_VA_LIST
1790 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1792 #undef TARGET_EXPAND_BUILTIN_VA_START
1793 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1795 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1796 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1798 #undef TARGET_EH_RETURN_FILTER_MODE
1799 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1813 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1814 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1816 #undef TARGET_MD_ASM_ADJUST
1817 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1819 #undef TARGET_OPTION_OVERRIDE
1820 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1822 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1823 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1824 rs6000_builtin_vectorized_function
1826 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1827 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1828 rs6000_builtin_md_vectorized_function
1830 #undef TARGET_STACK_PROTECT_GUARD
1831 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1833 #if !TARGET_MACHO
1834 #undef TARGET_STACK_PROTECT_FAIL
1835 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1836 #endif
1838 #ifdef HAVE_AS_TLS
1839 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1840 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1841 #endif
1843 /* Use a 32-bit anchor range. This leads to sequences like:
1845 addis tmp,anchor,high
1846 add dest,tmp,low
1848 where tmp itself acts as an anchor, and can be shared between
1849 accesses to the same 64k page. */
1850 #undef TARGET_MIN_ANCHOR_OFFSET
1851 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1852 #undef TARGET_MAX_ANCHOR_OFFSET
1853 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1854 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1855 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1856 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1857 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1859 #undef TARGET_BUILTIN_RECIPROCAL
1860 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1862 #undef TARGET_SECONDARY_RELOAD
1863 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1864 #undef TARGET_SECONDARY_MEMORY_NEEDED
1865 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1866 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1867 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1869 #undef TARGET_LEGITIMATE_ADDRESS_P
1870 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1872 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1873 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1875 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1876 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1878 #undef TARGET_CAN_ELIMINATE
1879 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1881 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1882 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1884 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1885 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1887 #undef TARGET_TRAMPOLINE_INIT
1888 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1890 #undef TARGET_FUNCTION_VALUE
1891 #define TARGET_FUNCTION_VALUE rs6000_function_value
1893 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1894 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1896 #undef TARGET_OPTION_SAVE
1897 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1899 #undef TARGET_OPTION_RESTORE
1900 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1902 #undef TARGET_OPTION_PRINT
1903 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1905 #undef TARGET_CAN_INLINE_P
1906 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1908 #undef TARGET_SET_CURRENT_FUNCTION
1909 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1911 #undef TARGET_LEGITIMATE_CONSTANT_P
1912 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1914 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1915 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1917 #undef TARGET_CAN_USE_DOLOOP_P
1918 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1920 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1921 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1923 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1924 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1925 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1926 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1927 #undef TARGET_UNWIND_WORD_MODE
1928 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1930 #undef TARGET_OFFLOAD_OPTIONS
1931 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1933 #undef TARGET_C_MODE_FOR_SUFFIX
1934 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1936 #undef TARGET_INVALID_BINARY_OP
1937 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1939 #undef TARGET_OPTAB_SUPPORTED_P
1940 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1942 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1943 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1945 #undef TARGET_COMPARE_VERSION_PRIORITY
1946 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1948 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1949 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1950 rs6000_generate_version_dispatcher_body
1952 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1953 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1954 rs6000_get_function_versions_dispatcher
1956 #undef TARGET_OPTION_FUNCTION_VERSIONS
1957 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1959 #undef TARGET_HARD_REGNO_NREGS
1960 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1961 #undef TARGET_HARD_REGNO_MODE_OK
1962 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1964 #undef TARGET_MODES_TIEABLE_P
1965 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1967 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1968 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1969 rs6000_hard_regno_call_part_clobbered
1971 #undef TARGET_SLOW_UNALIGNED_ACCESS
1972 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1974 #undef TARGET_CAN_CHANGE_MODE_CLASS
1975 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1977 #undef TARGET_CONSTANT_ALIGNMENT
1978 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1980 #undef TARGET_STARTING_FRAME_OFFSET
1981 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1984 /* Processor table. */
1985 struct rs6000_ptt
1987 const char *const name; /* Canonical processor name. */
1988 const enum processor_type processor; /* Processor type enum value. */
1989 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1992 static struct rs6000_ptt const processor_target_table[] =
1994 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1995 #include "rs6000-cpus.def"
1996 #undef RS6000_CPU
1999 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2000 name is invalid. */
2002 static int
2003 rs6000_cpu_name_lookup (const char *name)
2005 size_t i;
2007 if (name != NULL)
2009 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2010 if (! strcmp (name, processor_target_table[i].name))
2011 return (int)i;
2014 return -1;
2018 /* Return number of consecutive hard regs needed starting at reg REGNO
2019 to hold something of mode MODE.
2020 This is ordinarily the length in words of a value of mode MODE
2021 but can be less for certain modes in special long registers.
2023 POWER and PowerPC GPRs hold 32 bits worth;
2024 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2026 static int
2027 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2029 unsigned HOST_WIDE_INT reg_size;
2031 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2032 128-bit floating point that can go in vector registers, which has VSX
2033 memory addressing. */
2034 if (FP_REGNO_P (regno))
2035 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2036 ? UNITS_PER_VSX_WORD
2037 : UNITS_PER_FP_WORD);
2039 else if (ALTIVEC_REGNO_P (regno))
2040 reg_size = UNITS_PER_ALTIVEC_WORD;
2042 else
2043 reg_size = UNITS_PER_WORD;
2045 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2048 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2049 MODE. */
2050 static int
2051 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2053 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2055 if (COMPLEX_MODE_P (mode))
2056 mode = GET_MODE_INNER (mode);
2058 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2059 register combinations, and use PTImode where we need to deal with quad
2060 word memory operations. Don't allow quad words in the argument or frame
2061 pointer registers, just registers 0..31. */
2062 if (mode == PTImode)
2063 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2064 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2065 && ((regno & 1) == 0));
2067 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2068 implementations. Don't allow an item to be split between a FP register
2069 and an Altivec register. Allow TImode in all VSX registers if the user
2070 asked for it. */
2071 if (TARGET_VSX && VSX_REGNO_P (regno)
2072 && (VECTOR_MEM_VSX_P (mode)
2073 || FLOAT128_VECTOR_P (mode)
2074 || reg_addr[mode].scalar_in_vmx_p
2075 || mode == TImode
2076 || (TARGET_VADDUQM && mode == V1TImode)))
2078 if (FP_REGNO_P (regno))
2079 return FP_REGNO_P (last_regno);
2081 if (ALTIVEC_REGNO_P (regno))
2083 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2084 return 0;
2086 return ALTIVEC_REGNO_P (last_regno);
2090 /* The GPRs can hold any mode, but values bigger than one register
2091 cannot go past R31. */
2092 if (INT_REGNO_P (regno))
2093 return INT_REGNO_P (last_regno);
2095 /* The float registers (except for VSX vector modes) can only hold floating
2096 modes and DImode. */
2097 if (FP_REGNO_P (regno))
2099 if (FLOAT128_VECTOR_P (mode))
2100 return false;
2102 if (SCALAR_FLOAT_MODE_P (mode)
2103 && (mode != TDmode || (regno % 2) == 0)
2104 && FP_REGNO_P (last_regno))
2105 return 1;
2107 if (GET_MODE_CLASS (mode) == MODE_INT)
2109 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2110 return 1;
2112 if (TARGET_P8_VECTOR && (mode == SImode))
2113 return 1;
2115 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2116 return 1;
2119 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2120 && PAIRED_VECTOR_MODE (mode))
2121 return 1;
2123 return 0;
2126 /* The CR register can only hold CC modes. */
2127 if (CR_REGNO_P (regno))
2128 return GET_MODE_CLASS (mode) == MODE_CC;
2130 if (CA_REGNO_P (regno))
2131 return mode == Pmode || mode == SImode;
2133 /* AltiVec only in AldyVec registers. */
2134 if (ALTIVEC_REGNO_P (regno))
2135 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2136 || mode == V1TImode);
2138 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2139 and it must be able to fit within the register set. */
2141 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2144 /* Implement TARGET_HARD_REGNO_NREGS. */
2146 static unsigned int
2147 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2149 return rs6000_hard_regno_nregs[mode][regno];
2152 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2154 static bool
2155 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2157 return rs6000_hard_regno_mode_ok_p[mode][regno];
2160 /* Implement TARGET_MODES_TIEABLE_P.
2162 PTImode cannot tie with other modes because PTImode is restricted to even
2163 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2164 57744).
2166 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2167 128-bit floating point on VSX systems ties with other vectors. */
2169 static bool
2170 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2172 if (mode1 == PTImode)
2173 return mode2 == PTImode;
2174 if (mode2 == PTImode)
2175 return false;
2177 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2178 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2179 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2180 return false;
2182 if (SCALAR_FLOAT_MODE_P (mode1))
2183 return SCALAR_FLOAT_MODE_P (mode2);
2184 if (SCALAR_FLOAT_MODE_P (mode2))
2185 return false;
2187 if (GET_MODE_CLASS (mode1) == MODE_CC)
2188 return GET_MODE_CLASS (mode2) == MODE_CC;
2189 if (GET_MODE_CLASS (mode2) == MODE_CC)
2190 return false;
2192 if (PAIRED_VECTOR_MODE (mode1))
2193 return PAIRED_VECTOR_MODE (mode2);
2194 if (PAIRED_VECTOR_MODE (mode2))
2195 return false;
2197 return true;
2200 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2202 static bool
2203 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2217 return false;
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2224 int r, m;
2226 for (r = first_regno; r <= last_regno; ++r)
2228 const char *comma = "";
2229 int len;
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2240 if (len > 70)
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2253 comma = ", ";
2256 if (call_used_regs[r])
2258 if (len > 70)
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2269 if (fixed_regs[r])
2271 if (len > 70)
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2282 if (len > 70)
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2292 if (len > 70)
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2305 const char *ret;
2307 switch (v)
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 case VECTOR_PAIRED: ret = "paired"; break;
2314 case VECTOR_OTHER: ret = "other"; break;
2315 default: ret = "unknown"; break;
2318 return ret;
2321 /* Inner function printing just the address mask for a particular reload
2322 register class. */
2323 DEBUG_FUNCTION char *
2324 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2326 static char ret[8];
2327 char *p = ret;
2329 if ((mask & RELOAD_REG_VALID) != 0)
2330 *p++ = 'v';
2331 else if (keep_spaces)
2332 *p++ = ' ';
2334 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2335 *p++ = 'm';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2339 if ((mask & RELOAD_REG_INDEXED) != 0)
2340 *p++ = 'i';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2344 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2345 *p++ = 'O';
2346 else if ((mask & RELOAD_REG_OFFSET) != 0)
2347 *p++ = 'o';
2348 else if (keep_spaces)
2349 *p++ = ' ';
2351 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2352 *p++ = '+';
2353 else if (keep_spaces)
2354 *p++ = ' ';
2356 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2357 *p++ = '+';
2358 else if (keep_spaces)
2359 *p++ = ' ';
2361 if ((mask & RELOAD_REG_AND_M16) != 0)
2362 *p++ = '&';
2363 else if (keep_spaces)
2364 *p++ = ' ';
2366 *p = '\0';
2368 return ret;
2371 /* Print the address masks in a human readble fashion. */
2372 DEBUG_FUNCTION void
2373 rs6000_debug_print_mode (ssize_t m)
2375 ssize_t rc;
2376 int spaces = 0;
2377 bool fuse_extra_p;
2379 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2380 for (rc = 0; rc < N_RELOAD_REG; rc++)
2381 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2382 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2384 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2385 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2386 fprintf (stderr, " Reload=%c%c",
2387 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2388 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2392 if (reg_addr[m].scalar_in_vmx_p)
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2400 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2401 || reg_addr[m].fused_toc);
2402 if (!fuse_extra_p)
2404 for (rc = 0; rc < N_RELOAD_REG; rc++)
2406 if (rc != RELOAD_REG_ANY)
2408 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2409 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2410 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2411 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2412 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2414 fuse_extra_p = true;
2415 break;
2421 if (fuse_extra_p)
2423 fprintf (stderr, "%*s Fuse:", spaces, "");
2424 spaces = 0;
2426 for (rc = 0; rc < N_RELOAD_REG; rc++)
2428 if (rc != RELOAD_REG_ANY)
2430 char load, store;
2432 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2433 load = 'l';
2434 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2435 load = 'L';
2436 else
2437 load = '-';
2439 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2440 store = 's';
2441 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2442 store = 'S';
2443 else
2444 store = '-';
2446 if (load == '-' && store == '-')
2447 spaces += 5;
2448 else
2450 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2451 reload_reg_map[rc].name[0], load, store);
2452 spaces = 0;
2457 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2459 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2460 spaces = 0;
2462 else
2463 spaces += sizeof (" P8gpr") - 1;
2465 if (reg_addr[m].fused_toc)
2467 fprintf (stderr, "%*sToc", (spaces + 1), "");
2468 spaces = 0;
2470 else
2471 spaces += sizeof (" Toc") - 1;
2473 else
2474 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2476 if (rs6000_vector_unit[m] != VECTOR_NONE
2477 || rs6000_vector_mem[m] != VECTOR_NONE)
2479 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2480 spaces, "",
2481 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2482 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2485 fputs ("\n", stderr);
2488 #define DEBUG_FMT_ID "%-32s= "
2489 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2490 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2491 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2493 /* Print various interesting information with -mdebug=reg. */
2494 static void
2495 rs6000_debug_reg_global (void)
2497 static const char *const tf[2] = { "false", "true" };
2498 const char *nl = (const char *)0;
2499 int m;
2500 size_t m1, m2, v;
2501 char costly_num[20];
2502 char nop_num[20];
2503 char flags_buffer[40];
2504 const char *costly_str;
2505 const char *nop_str;
2506 const char *trace_str;
2507 const char *abi_str;
2508 const char *cmodel_str;
2509 struct cl_target_option cl_opts;
2511 /* Modes we want tieable information on. */
2512 static const machine_mode print_tieable_modes[] = {
2513 QImode,
2514 HImode,
2515 SImode,
2516 DImode,
2517 TImode,
2518 PTImode,
2519 SFmode,
2520 DFmode,
2521 TFmode,
2522 IFmode,
2523 KFmode,
2524 SDmode,
2525 DDmode,
2526 TDmode,
2527 V2SImode,
2528 V16QImode,
2529 V8HImode,
2530 V4SImode,
2531 V2DImode,
2532 V1TImode,
2533 V32QImode,
2534 V16HImode,
2535 V8SImode,
2536 V4DImode,
2537 V2TImode,
2538 V2SFmode,
2539 V4SFmode,
2540 V2DFmode,
2541 V8SFmode,
2542 V4DFmode,
2543 CCmode,
2544 CCUNSmode,
2545 CCEQmode,
2548 /* Virtual regs we are interested in. */
2549 const static struct {
2550 int regno; /* register number. */
2551 const char *name; /* register name. */
2552 } virtual_regs[] = {
2553 { STACK_POINTER_REGNUM, "stack pointer:" },
2554 { TOC_REGNUM, "toc: " },
2555 { STATIC_CHAIN_REGNUM, "static chain: " },
2556 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2557 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2558 { ARG_POINTER_REGNUM, "arg pointer: " },
2559 { FRAME_POINTER_REGNUM, "frame pointer:" },
2560 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2561 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2562 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2563 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2564 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2565 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2566 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2567 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2568 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2571 fputs ("\nHard register information:\n", stderr);
2572 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2573 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2574 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2575 LAST_ALTIVEC_REGNO,
2576 "vs");
2577 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2578 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2579 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2580 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2581 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2582 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2584 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2585 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2586 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2588 fprintf (stderr,
2589 "\n"
2590 "d reg_class = %s\n"
2591 "f reg_class = %s\n"
2592 "v reg_class = %s\n"
2593 "wa reg_class = %s\n"
2594 "wb reg_class = %s\n"
2595 "wd reg_class = %s\n"
2596 "we reg_class = %s\n"
2597 "wf reg_class = %s\n"
2598 "wg reg_class = %s\n"
2599 "wh reg_class = %s\n"
2600 "wi reg_class = %s\n"
2601 "wj reg_class = %s\n"
2602 "wk reg_class = %s\n"
2603 "wl reg_class = %s\n"
2604 "wm reg_class = %s\n"
2605 "wo reg_class = %s\n"
2606 "wp reg_class = %s\n"
2607 "wq reg_class = %s\n"
2608 "wr reg_class = %s\n"
2609 "ws reg_class = %s\n"
2610 "wt reg_class = %s\n"
2611 "wu reg_class = %s\n"
2612 "wv reg_class = %s\n"
2613 "ww reg_class = %s\n"
2614 "wx reg_class = %s\n"
2615 "wy reg_class = %s\n"
2616 "wz reg_class = %s\n"
2617 "wA reg_class = %s\n"
2618 "wH reg_class = %s\n"
2619 "wI reg_class = %s\n"
2620 "wJ reg_class = %s\n"
2621 "wK reg_class = %s\n"
2622 "\n",
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2651 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2652 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2653 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2654 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2656 nl = "\n";
2657 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2658 rs6000_debug_print_mode (m);
2660 fputs ("\n", stderr);
2662 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2664 machine_mode mode1 = print_tieable_modes[m1];
2665 bool first_time = true;
2667 nl = (const char *)0;
2668 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2670 machine_mode mode2 = print_tieable_modes[m2];
2671 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2673 if (first_time)
2675 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2676 nl = "\n";
2677 first_time = false;
2680 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2684 if (!first_time)
2685 fputs ("\n", stderr);
2688 if (nl)
2689 fputs (nl, stderr);
2691 if (rs6000_recip_control)
2693 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2695 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2696 if (rs6000_recip_bits[m])
2698 fprintf (stderr,
2699 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2700 GET_MODE_NAME (m),
2701 (RS6000_RECIP_AUTO_RE_P (m)
2702 ? "auto"
2703 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2704 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2705 ? "auto"
2706 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2709 fputs ("\n", stderr);
2712 if (rs6000_cpu_index >= 0)
2714 const char *name = processor_target_table[rs6000_cpu_index].name;
2715 HOST_WIDE_INT flags
2716 = processor_target_table[rs6000_cpu_index].target_enable;
2718 sprintf (flags_buffer, "-mcpu=%s flags", name);
2719 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2721 else
2722 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2724 if (rs6000_tune_index >= 0)
2726 const char *name = processor_target_table[rs6000_tune_index].name;
2727 HOST_WIDE_INT flags
2728 = processor_target_table[rs6000_tune_index].target_enable;
2730 sprintf (flags_buffer, "-mtune=%s flags", name);
2731 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2733 else
2734 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2736 cl_target_option_save (&cl_opts, &global_options);
2737 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2738 rs6000_isa_flags);
2740 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2741 rs6000_isa_flags_explicit);
2743 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2744 rs6000_builtin_mask);
2746 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2748 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2749 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2751 switch (rs6000_sched_costly_dep)
2753 case max_dep_latency:
2754 costly_str = "max_dep_latency";
2755 break;
2757 case no_dep_costly:
2758 costly_str = "no_dep_costly";
2759 break;
2761 case all_deps_costly:
2762 costly_str = "all_deps_costly";
2763 break;
2765 case true_store_to_load_dep_costly:
2766 costly_str = "true_store_to_load_dep_costly";
2767 break;
2769 case store_to_load_dep_costly:
2770 costly_str = "store_to_load_dep_costly";
2771 break;
2773 default:
2774 costly_str = costly_num;
2775 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2776 break;
2779 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2781 switch (rs6000_sched_insert_nops)
2783 case sched_finish_regroup_exact:
2784 nop_str = "sched_finish_regroup_exact";
2785 break;
2787 case sched_finish_pad_groups:
2788 nop_str = "sched_finish_pad_groups";
2789 break;
2791 case sched_finish_none:
2792 nop_str = "sched_finish_none";
2793 break;
2795 default:
2796 nop_str = nop_num;
2797 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2798 break;
2801 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2803 switch (rs6000_sdata)
2805 default:
2806 case SDATA_NONE:
2807 break;
2809 case SDATA_DATA:
2810 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2811 break;
2813 case SDATA_SYSV:
2814 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2815 break;
2817 case SDATA_EABI:
2818 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2819 break;
2823 switch (rs6000_traceback)
2825 case traceback_default: trace_str = "default"; break;
2826 case traceback_none: trace_str = "none"; break;
2827 case traceback_part: trace_str = "part"; break;
2828 case traceback_full: trace_str = "full"; break;
2829 default: trace_str = "unknown"; break;
2832 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2834 switch (rs6000_current_cmodel)
2836 case CMODEL_SMALL: cmodel_str = "small"; break;
2837 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2838 case CMODEL_LARGE: cmodel_str = "large"; break;
2839 default: cmodel_str = "unknown"; break;
2842 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2844 switch (rs6000_current_abi)
2846 case ABI_NONE: abi_str = "none"; break;
2847 case ABI_AIX: abi_str = "aix"; break;
2848 case ABI_ELFv2: abi_str = "ELFv2"; break;
2849 case ABI_V4: abi_str = "V4"; break;
2850 case ABI_DARWIN: abi_str = "darwin"; break;
2851 default: abi_str = "unknown"; break;
2854 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2856 if (rs6000_altivec_abi)
2857 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2859 if (rs6000_darwin64_abi)
2860 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2862 fprintf (stderr, DEBUG_FMT_S, "single_float",
2863 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2865 fprintf (stderr, DEBUG_FMT_S, "double_float",
2866 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2868 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2869 (TARGET_SOFT_FLOAT ? "true" : "false"));
2871 if (TARGET_LINK_STACK)
2872 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2874 if (TARGET_P8_FUSION)
2876 char options[80];
2878 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2879 if (TARGET_TOC_FUSION)
2880 strcat (options, ", toc");
2882 if (TARGET_P8_FUSION_SIGN)
2883 strcat (options, ", sign");
2885 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2888 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2889 TARGET_SECURE_PLT ? "secure" : "bss");
2890 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2891 aix_struct_return ? "aix" : "sysv");
2892 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2893 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2894 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2895 tf[!!rs6000_align_branch_targets]);
2896 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2897 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2898 rs6000_long_double_type_size);
2899 if (rs6000_long_double_type_size == 128)
2901 fprintf (stderr, DEBUG_FMT_S, "long double type",
2902 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2903 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2904 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2906 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2907 (int)rs6000_sched_restricted_insns_priority);
2908 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2909 (int)END_BUILTINS);
2910 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2911 (int)RS6000_BUILTIN_COUNT);
2913 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2914 (int)TARGET_FLOAT128_ENABLE_TYPE);
2916 if (TARGET_VSX)
2917 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2918 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2920 if (TARGET_DIRECT_MOVE_128)
2921 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2922 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2926 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2927 legitimate address support to figure out the appropriate addressing to
2928 use. */
2930 static void
2931 rs6000_setup_reg_addr_masks (void)
2933 ssize_t rc, reg, m, nregs;
2934 addr_mask_type any_addr_mask, addr_mask;
2936 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2938 machine_mode m2 = (machine_mode) m;
2939 bool complex_p = false;
2940 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2941 size_t msize;
2943 if (COMPLEX_MODE_P (m2))
2945 complex_p = true;
2946 m2 = GET_MODE_INNER (m2);
2949 msize = GET_MODE_SIZE (m2);
2951 /* SDmode is special in that we want to access it only via REG+REG
2952 addressing on power7 and above, since we want to use the LFIWZX and
2953 STFIWZX instructions to load it. */
2954 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2956 any_addr_mask = 0;
2957 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2959 addr_mask = 0;
2960 reg = reload_reg_map[rc].reg;
2962 /* Can mode values go in the GPR/FPR/Altivec registers? */
2963 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2965 bool small_int_vsx_p = (small_int_p
2966 && (rc == RELOAD_REG_FPR
2967 || rc == RELOAD_REG_VMX));
2969 nregs = rs6000_hard_regno_nregs[m][reg];
2970 addr_mask |= RELOAD_REG_VALID;
2972 /* Indicate if the mode takes more than 1 physical register. If
2973 it takes a single register, indicate it can do REG+REG
2974 addressing. Small integers in VSX registers can only do
2975 REG+REG addressing. */
2976 if (small_int_vsx_p)
2977 addr_mask |= RELOAD_REG_INDEXED;
2978 else if (nregs > 1 || m == BLKmode || complex_p)
2979 addr_mask |= RELOAD_REG_MULTIPLE;
2980 else
2981 addr_mask |= RELOAD_REG_INDEXED;
2983 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2984 addressing. If we allow scalars into Altivec registers,
2985 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2987 if (TARGET_UPDATE
2988 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2989 && msize <= 8
2990 && !VECTOR_MODE_P (m2)
2991 && !FLOAT128_VECTOR_P (m2)
2992 && !complex_p
2993 && !small_int_vsx_p)
2995 addr_mask |= RELOAD_REG_PRE_INCDEC;
2997 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2998 we don't allow PRE_MODIFY for some multi-register
2999 operations. */
3000 switch (m)
3002 default:
3003 addr_mask |= RELOAD_REG_PRE_MODIFY;
3004 break;
3006 case E_DImode:
3007 if (TARGET_POWERPC64)
3008 addr_mask |= RELOAD_REG_PRE_MODIFY;
3009 break;
3011 case E_DFmode:
3012 case E_DDmode:
3013 if (TARGET_DF_INSN)
3014 addr_mask |= RELOAD_REG_PRE_MODIFY;
3015 break;
3020 /* GPR and FPR registers can do REG+OFFSET addressing, except
3021 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3022 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3023 if ((addr_mask != 0) && !indexed_only_p
3024 && msize <= 8
3025 && (rc == RELOAD_REG_GPR
3026 || ((msize == 8 || m2 == SFmode)
3027 && (rc == RELOAD_REG_FPR
3028 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3029 addr_mask |= RELOAD_REG_OFFSET;
3031 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3032 instructions are enabled. The offset for 128-bit VSX registers is
3033 only 12-bits. While GPRs can handle the full offset range, VSX
3034 registers can only handle the restricted range. */
3035 else if ((addr_mask != 0) && !indexed_only_p
3036 && msize == 16 && TARGET_P9_VECTOR
3037 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3038 || (m2 == TImode && TARGET_VSX)))
3040 addr_mask |= RELOAD_REG_OFFSET;
3041 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3042 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3045 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3046 addressing on 128-bit types. */
3047 if (rc == RELOAD_REG_VMX && msize == 16
3048 && (addr_mask & RELOAD_REG_VALID) != 0)
3049 addr_mask |= RELOAD_REG_AND_M16;
3051 reg_addr[m].addr_mask[rc] = addr_mask;
3052 any_addr_mask |= addr_mask;
3055 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3060 /* Initialize the various global tables that are based on register size. */
3061 static void
3062 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3064 ssize_t r, m, c;
3065 int align64;
3066 int align32;
3068 /* Precalculate REGNO_REG_CLASS. */
3069 rs6000_regno_regclass[0] = GENERAL_REGS;
3070 for (r = 1; r < 32; ++r)
3071 rs6000_regno_regclass[r] = BASE_REGS;
3073 for (r = 32; r < 64; ++r)
3074 rs6000_regno_regclass[r] = FLOAT_REGS;
3076 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3077 rs6000_regno_regclass[r] = NO_REGS;
3079 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3080 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3082 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3083 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3084 rs6000_regno_regclass[r] = CR_REGS;
3086 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3087 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3088 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3089 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3090 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3091 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3092 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3093 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3094 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3095 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3097 /* Precalculate register class to simpler reload register class. We don't
3098 need all of the register classes that are combinations of different
3099 classes, just the simple ones that have constraint letters. */
3100 for (c = 0; c < N_REG_CLASSES; c++)
3101 reg_class_to_reg_type[c] = NO_REG_TYPE;
3103 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3105 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3106 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3107 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3108 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3109 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3110 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3111 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3112 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3114 if (TARGET_VSX)
3116 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3117 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3119 else
3121 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3122 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3125 /* Precalculate the valid memory formats as well as the vector information,
3126 this must be set up before the rs6000_hard_regno_nregs_internal calls
3127 below. */
3128 gcc_assert ((int)VECTOR_NONE == 0);
3129 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3130 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3132 gcc_assert ((int)CODE_FOR_nothing == 0);
3133 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3135 gcc_assert ((int)NO_REGS == 0);
3136 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3138 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3139 believes it can use native alignment or still uses 128-bit alignment. */
3140 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3142 align64 = 64;
3143 align32 = 32;
3145 else
3147 align64 = 128;
3148 align32 = 128;
3151 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3152 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3153 if (TARGET_FLOAT128_TYPE)
3155 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3156 rs6000_vector_align[KFmode] = 128;
3158 if (FLOAT128_IEEE_P (TFmode))
3160 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3161 rs6000_vector_align[TFmode] = 128;
3165 /* V2DF mode, VSX only. */
3166 if (TARGET_VSX)
3168 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3169 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3170 rs6000_vector_align[V2DFmode] = align64;
3173 /* V4SF mode, either VSX or Altivec. */
3174 if (TARGET_VSX)
3176 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3177 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3178 rs6000_vector_align[V4SFmode] = align32;
3180 else if (TARGET_ALTIVEC)
3182 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3183 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3184 rs6000_vector_align[V4SFmode] = align32;
3187 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3188 and stores. */
3189 if (TARGET_ALTIVEC)
3191 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3192 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3193 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3194 rs6000_vector_align[V4SImode] = align32;
3195 rs6000_vector_align[V8HImode] = align32;
3196 rs6000_vector_align[V16QImode] = align32;
3198 if (TARGET_VSX)
3200 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3201 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3202 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3204 else
3206 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3207 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3208 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3212 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3213 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3214 if (TARGET_VSX)
3216 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3217 rs6000_vector_unit[V2DImode]
3218 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3219 rs6000_vector_align[V2DImode] = align64;
3221 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3222 rs6000_vector_unit[V1TImode]
3223 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3224 rs6000_vector_align[V1TImode] = 128;
3227 /* DFmode, see if we want to use the VSX unit. Memory is handled
3228 differently, so don't set rs6000_vector_mem. */
3229 if (TARGET_VSX)
3231 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3232 rs6000_vector_align[DFmode] = 64;
3235 /* SFmode, see if we want to use the VSX unit. */
3236 if (TARGET_P8_VECTOR)
3238 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3239 rs6000_vector_align[SFmode] = 32;
3242 /* Allow TImode in VSX register and set the VSX memory macros. */
3243 if (TARGET_VSX)
3245 rs6000_vector_mem[TImode] = VECTOR_VSX;
3246 rs6000_vector_align[TImode] = align64;
3249 /* TODO add paired floating point vector support. */
3251 /* Register class constraints for the constraints that depend on compile
3252 switches. When the VSX code was added, different constraints were added
3253 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3254 of the VSX registers are used. The register classes for scalar floating
3255 point types is set, based on whether we allow that type into the upper
3256 (Altivec) registers. GCC has register classes to target the Altivec
3257 registers for load/store operations, to select using a VSX memory
3258 operation instead of the traditional floating point operation. The
3259 constraints are:
3261 d - Register class to use with traditional DFmode instructions.
3262 f - Register class to use with traditional SFmode instructions.
3263 v - Altivec register.
3264 wa - Any VSX register.
3265 wc - Reserved to represent individual CR bits (used in LLVM).
3266 wd - Preferred register class for V2DFmode.
3267 wf - Preferred register class for V4SFmode.
3268 wg - Float register for power6x move insns.
3269 wh - FP register for direct move instructions.
3270 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3271 wj - FP or VSX register to hold 64-bit integers for direct moves.
3272 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3273 wl - Float register if we can do 32-bit signed int loads.
3274 wm - VSX register for ISA 2.07 direct move operations.
3275 wn - always NO_REGS.
3276 wr - GPR if 64-bit mode is permitted.
3277 ws - Register class to do ISA 2.06 DF operations.
3278 wt - VSX register for TImode in VSX registers.
3279 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3280 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3281 ww - Register class to do SF conversions in with VSX operations.
3282 wx - Float register if we can do 32-bit int stores.
3283 wy - Register class to do ISA 2.07 SF operations.
3284 wz - Float register if we can do 32-bit unsigned int loads.
3285 wH - Altivec register if SImode is allowed in VSX registers.
3286 wI - VSX register if SImode is allowed in VSX registers.
3287 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3288 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3290 if (TARGET_HARD_FLOAT)
3291 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3293 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3294 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3296 if (TARGET_VSX)
3298 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3299 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3300 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3301 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3302 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3303 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3304 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3307 /* Add conditional constraints based on various options, to allow us to
3308 collapse multiple insn patterns. */
3309 if (TARGET_ALTIVEC)
3310 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3312 if (TARGET_MFPGPR) /* DFmode */
3313 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3315 if (TARGET_LFIWAX)
3316 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3318 if (TARGET_DIRECT_MOVE)
3320 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3321 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3322 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3323 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3324 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3325 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3328 if (TARGET_POWERPC64)
3330 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3331 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3334 if (TARGET_P8_VECTOR) /* SFmode */
3336 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3337 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3338 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3340 else if (TARGET_VSX)
3341 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3343 if (TARGET_STFIWX)
3344 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3346 if (TARGET_LFIWZX)
3347 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3349 if (TARGET_FLOAT128_TYPE)
3351 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3352 if (FLOAT128_IEEE_P (TFmode))
3353 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3356 if (TARGET_P9_VECTOR)
3358 /* Support for new D-form instructions. */
3359 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3361 /* Support for ISA 3.0 (power9) vectors. */
3362 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3365 /* Support for new direct moves (ISA 3.0 + 64bit). */
3366 if (TARGET_DIRECT_MOVE_128)
3367 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3369 /* Support small integers in VSX registers. */
3370 if (TARGET_P8_VECTOR)
3372 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3373 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3374 if (TARGET_P9_VECTOR)
3376 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3377 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3381 /* Set up the reload helper and direct move functions. */
3382 if (TARGET_VSX || TARGET_ALTIVEC)
3384 if (TARGET_64BIT)
3386 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3387 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3388 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3389 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3390 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3391 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3392 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3393 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3394 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3395 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3396 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3397 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3398 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3399 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3400 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3401 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3402 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3403 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3404 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3405 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3407 if (FLOAT128_VECTOR_P (KFmode))
3409 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3410 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3413 if (FLOAT128_VECTOR_P (TFmode))
3415 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3416 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3419 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3420 available. */
3421 if (TARGET_NO_SDMODE_STACK)
3423 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3424 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3427 if (TARGET_VSX)
3429 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3430 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3433 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3435 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3436 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3437 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3438 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3439 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3440 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3441 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3442 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3443 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3445 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3446 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3447 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3448 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3449 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3450 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3451 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3452 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3453 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3455 if (FLOAT128_VECTOR_P (KFmode))
3457 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3458 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3461 if (FLOAT128_VECTOR_P (TFmode))
3463 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3464 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3468 else
3470 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3471 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3472 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3473 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3474 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3475 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3476 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3477 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3478 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3479 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3480 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3481 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3482 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3483 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3484 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3485 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3486 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3487 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3488 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3489 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3491 if (FLOAT128_VECTOR_P (KFmode))
3493 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3494 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3497 if (FLOAT128_IEEE_P (TFmode))
3499 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3500 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3503 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3504 available. */
3505 if (TARGET_NO_SDMODE_STACK)
3507 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3508 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3511 if (TARGET_VSX)
3513 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3514 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3517 if (TARGET_DIRECT_MOVE)
3519 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3520 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3521 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3525 reg_addr[DFmode].scalar_in_vmx_p = true;
3526 reg_addr[DImode].scalar_in_vmx_p = true;
3528 if (TARGET_P8_VECTOR)
3530 reg_addr[SFmode].scalar_in_vmx_p = true;
3531 reg_addr[SImode].scalar_in_vmx_p = true;
3533 if (TARGET_P9_VECTOR)
3535 reg_addr[HImode].scalar_in_vmx_p = true;
3536 reg_addr[QImode].scalar_in_vmx_p = true;
3541 /* Setup the fusion operations. */
3542 if (TARGET_P8_FUSION)
3544 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3545 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3546 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3547 if (TARGET_64BIT)
3548 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3551 if (TARGET_P9_FUSION)
3553 struct fuse_insns {
3554 enum machine_mode mode; /* mode of the fused type. */
3555 enum machine_mode pmode; /* pointer mode. */
3556 enum rs6000_reload_reg_type rtype; /* register type. */
3557 enum insn_code load; /* load insn. */
3558 enum insn_code store; /* store insn. */
3561 static const struct fuse_insns addis_insns[] = {
3562 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_di_sf_load,
3564 CODE_FOR_fusion_vsx_di_sf_store },
3566 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_si_sf_load,
3568 CODE_FOR_fusion_vsx_si_sf_store },
3570 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3571 CODE_FOR_fusion_vsx_di_df_load,
3572 CODE_FOR_fusion_vsx_di_df_store },
3574 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3575 CODE_FOR_fusion_vsx_si_df_load,
3576 CODE_FOR_fusion_vsx_si_df_store },
3578 { E_DImode, E_DImode, RELOAD_REG_FPR,
3579 CODE_FOR_fusion_vsx_di_di_load,
3580 CODE_FOR_fusion_vsx_di_di_store },
3582 { E_DImode, E_SImode, RELOAD_REG_FPR,
3583 CODE_FOR_fusion_vsx_si_di_load,
3584 CODE_FOR_fusion_vsx_si_di_store },
3586 { E_QImode, E_DImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_di_qi_load,
3588 CODE_FOR_fusion_gpr_di_qi_store },
3590 { E_QImode, E_SImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_si_qi_load,
3592 CODE_FOR_fusion_gpr_si_qi_store },
3594 { E_HImode, E_DImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_di_hi_load,
3596 CODE_FOR_fusion_gpr_di_hi_store },
3598 { E_HImode, E_SImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_si_hi_load,
3600 CODE_FOR_fusion_gpr_si_hi_store },
3602 { E_SImode, E_DImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_di_si_load,
3604 CODE_FOR_fusion_gpr_di_si_store },
3606 { E_SImode, E_SImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_si_si_load,
3608 CODE_FOR_fusion_gpr_si_si_store },
3610 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3611 CODE_FOR_fusion_gpr_di_sf_load,
3612 CODE_FOR_fusion_gpr_di_sf_store },
3614 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3615 CODE_FOR_fusion_gpr_si_sf_load,
3616 CODE_FOR_fusion_gpr_si_sf_store },
3618 { E_DImode, E_DImode, RELOAD_REG_GPR,
3619 CODE_FOR_fusion_gpr_di_di_load,
3620 CODE_FOR_fusion_gpr_di_di_store },
3622 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3623 CODE_FOR_fusion_gpr_di_df_load,
3624 CODE_FOR_fusion_gpr_di_df_store },
3627 machine_mode cur_pmode = Pmode;
3628 size_t i;
3630 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3632 machine_mode xmode = addis_insns[i].mode;
3633 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3635 if (addis_insns[i].pmode != cur_pmode)
3636 continue;
3638 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3639 continue;
3641 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3642 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3644 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3646 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3647 = addis_insns[i].load;
3648 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3649 = addis_insns[i].store;
3654 /* Note which types we support fusing TOC setup plus memory insn. We only do
3655 fused TOCs for medium/large code models. */
3656 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3657 && (TARGET_CMODEL != CMODEL_SMALL))
3659 reg_addr[QImode].fused_toc = true;
3660 reg_addr[HImode].fused_toc = true;
3661 reg_addr[SImode].fused_toc = true;
3662 reg_addr[DImode].fused_toc = true;
3663 if (TARGET_HARD_FLOAT)
3665 if (TARGET_SINGLE_FLOAT)
3666 reg_addr[SFmode].fused_toc = true;
3667 if (TARGET_DOUBLE_FLOAT)
3668 reg_addr[DFmode].fused_toc = true;
3672 /* Precalculate HARD_REGNO_NREGS. */
3673 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3674 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3675 rs6000_hard_regno_nregs[m][r]
3676 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3678 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3679 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3680 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3681 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3682 rs6000_hard_regno_mode_ok_p[m][r] = true;
3684 /* Precalculate CLASS_MAX_NREGS sizes. */
3685 for (c = 0; c < LIM_REG_CLASSES; ++c)
3687 int reg_size;
3689 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3690 reg_size = UNITS_PER_VSX_WORD;
3692 else if (c == ALTIVEC_REGS)
3693 reg_size = UNITS_PER_ALTIVEC_WORD;
3695 else if (c == FLOAT_REGS)
3696 reg_size = UNITS_PER_FP_WORD;
3698 else
3699 reg_size = UNITS_PER_WORD;
3701 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3703 machine_mode m2 = (machine_mode)m;
3704 int reg_size2 = reg_size;
3706 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3707 in VSX. */
3708 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3709 reg_size2 = UNITS_PER_FP_WORD;
3711 rs6000_class_max_nregs[m][c]
3712 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3716 /* Calculate which modes to automatically generate code to use a the
3717 reciprocal divide and square root instructions. In the future, possibly
3718 automatically generate the instructions even if the user did not specify
3719 -mrecip. The older machines double precision reciprocal sqrt estimate is
3720 not accurate enough. */
3721 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3722 if (TARGET_FRES)
3723 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3724 if (TARGET_FRE)
3725 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3726 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3727 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3728 if (VECTOR_UNIT_VSX_P (V2DFmode))
3729 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3731 if (TARGET_FRSQRTES)
3732 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3733 if (TARGET_FRSQRTE)
3734 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3735 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3736 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3737 if (VECTOR_UNIT_VSX_P (V2DFmode))
3738 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3740 if (rs6000_recip_control)
3742 if (!flag_finite_math_only)
3743 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3744 "-ffast-math");
3745 if (flag_trapping_math)
3746 warning (0, "%qs requires %qs or %qs", "-mrecip",
3747 "-fno-trapping-math", "-ffast-math");
3748 if (!flag_reciprocal_math)
3749 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3750 "-ffast-math");
3751 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3753 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3754 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3755 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3757 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3758 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3759 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3761 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3762 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3763 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3765 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3766 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3767 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3769 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3770 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3771 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3773 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3774 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3775 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3777 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3778 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3779 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3781 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3782 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3783 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3787 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3788 legitimate address support to figure out the appropriate addressing to
3789 use. */
3790 rs6000_setup_reg_addr_masks ();
3792 if (global_init_p || TARGET_DEBUG_TARGET)
3794 if (TARGET_DEBUG_REG)
3795 rs6000_debug_reg_global ();
3797 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3798 fprintf (stderr,
3799 "SImode variable mult cost = %d\n"
3800 "SImode constant mult cost = %d\n"
3801 "SImode short constant mult cost = %d\n"
3802 "DImode multipliciation cost = %d\n"
3803 "SImode division cost = %d\n"
3804 "DImode division cost = %d\n"
3805 "Simple fp operation cost = %d\n"
3806 "DFmode multiplication cost = %d\n"
3807 "SFmode division cost = %d\n"
3808 "DFmode division cost = %d\n"
3809 "cache line size = %d\n"
3810 "l1 cache size = %d\n"
3811 "l2 cache size = %d\n"
3812 "simultaneous prefetches = %d\n"
3813 "\n",
3814 rs6000_cost->mulsi,
3815 rs6000_cost->mulsi_const,
3816 rs6000_cost->mulsi_const9,
3817 rs6000_cost->muldi,
3818 rs6000_cost->divsi,
3819 rs6000_cost->divdi,
3820 rs6000_cost->fp,
3821 rs6000_cost->dmul,
3822 rs6000_cost->sdiv,
3823 rs6000_cost->ddiv,
3824 rs6000_cost->cache_line_size,
3825 rs6000_cost->l1_cache_size,
3826 rs6000_cost->l2_cache_size,
3827 rs6000_cost->simultaneous_prefetches);
3831 #if TARGET_MACHO
3832 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3834 static void
3835 darwin_rs6000_override_options (void)
3837 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3838 off. */
3839 rs6000_altivec_abi = 1;
3840 TARGET_ALTIVEC_VRSAVE = 1;
3841 rs6000_current_abi = ABI_DARWIN;
3843 if (DEFAULT_ABI == ABI_DARWIN
3844 && TARGET_64BIT)
3845 darwin_one_byte_bool = 1;
3847 if (TARGET_64BIT && ! TARGET_POWERPC64)
3849 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3850 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3852 if (flag_mkernel)
3854 rs6000_default_long_calls = 1;
3855 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3858 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3859 Altivec. */
3860 if (!flag_mkernel && !flag_apple_kext
3861 && TARGET_64BIT
3862 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3863 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3865 /* Unless the user (not the configurer) has explicitly overridden
3866 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3867 G4 unless targeting the kernel. */
3868 if (!flag_mkernel
3869 && !flag_apple_kext
3870 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3871 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3872 && ! global_options_set.x_rs6000_cpu_index)
3874 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3877 #endif
3879 /* If not otherwise specified by a target, make 'long double' equivalent to
3880 'double'. */
3882 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3883 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3884 #endif
3886 /* Return the builtin mask of the various options used that could affect which
3887 builtins were used. In the past we used target_flags, but we've run out of
3888 bits, and some options like PAIRED are no longer in target_flags. */
3890 HOST_WIDE_INT
3891 rs6000_builtin_mask_calculate (void)
3893 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3894 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3895 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3896 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3897 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3898 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3899 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3900 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3901 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3902 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3903 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3904 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3905 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3906 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3907 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3908 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3909 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3910 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3911 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3912 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3913 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3914 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3917 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3918 to clobber the XER[CA] bit because clobbering that bit without telling
3919 the compiler worked just fine with versions of GCC before GCC 5, and
3920 breaking a lot of older code in ways that are hard to track down is
3921 not such a great idea. */
3923 static rtx_insn *
3924 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3925 vec<const char *> &/*constraints*/,
3926 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3928 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3929 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3930 return NULL;
3933 /* Override command line options.
3935 Combine build-specific configuration information with options
3936 specified on the command line to set various state variables which
3937 influence code generation, optimization, and expansion of built-in
3938 functions. Assure that command-line configuration preferences are
3939 compatible with each other and with the build configuration; issue
3940 warnings while adjusting configuration or error messages while
3941 rejecting configuration.
3943 Upon entry to this function:
3945 This function is called once at the beginning of
3946 compilation, and then again at the start and end of compiling
3947 each section of code that has a different configuration, as
3948 indicated, for example, by adding the
3950 __attribute__((__target__("cpu=power9")))
3952 qualifier to a function definition or, for example, by bracketing
3953 code between
3955 #pragma GCC target("altivec")
3959 #pragma GCC reset_options
3961 directives. Parameter global_init_p is true for the initial
3962 invocation, which initializes global variables, and false for all
3963 subsequent invocations.
3966 Various global state information is assumed to be valid. This
3967 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3968 default CPU specified at build configure time, TARGET_DEFAULT,
3969 representing the default set of option flags for the default
3970 target, and global_options_set.x_rs6000_isa_flags, representing
3971 which options were requested on the command line.
3973 Upon return from this function:
3975 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3976 was set by name on the command line. Additionally, if certain
3977 attributes are automatically enabled or disabled by this function
3978 in order to assure compatibility between options and
3979 configuration, the flags associated with those attributes are
3980 also set. By setting these "explicit bits", we avoid the risk
3981 that other code might accidentally overwrite these particular
3982 attributes with "default values".
3984 The various bits of rs6000_isa_flags are set to indicate the
3985 target options that have been selected for the most current
3986 compilation efforts. This has the effect of also turning on the
3987 associated TARGET_XXX values since these are macros which are
3988 generally defined to test the corresponding bit of the
3989 rs6000_isa_flags variable.
3991 The variable rs6000_builtin_mask is set to represent the target
3992 options for the most current compilation efforts, consistent with
3993 the current contents of rs6000_isa_flags. This variable controls
3994 expansion of built-in functions.
3996 Various other global variables and fields of global structures
3997 (over 50 in all) are initialized to reflect the desired options
3998 for the most current compilation efforts. */
4000 static bool
4001 rs6000_option_override_internal (bool global_init_p)
4003 bool ret = true;
4005 HOST_WIDE_INT set_masks;
4006 HOST_WIDE_INT ignore_masks;
4007 int cpu_index = -1;
4008 int tune_index;
4009 struct cl_target_option *main_target_opt
4010 = ((global_init_p || target_option_default_node == NULL)
4011 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4013 /* Print defaults. */
4014 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4015 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4017 /* Remember the explicit arguments. */
4018 if (global_init_p)
4019 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4021 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4022 library functions, so warn about it. The flag may be useful for
4023 performance studies from time to time though, so don't disable it
4024 entirely. */
4025 if (global_options_set.x_rs6000_alignment_flags
4026 && rs6000_alignment_flags == MASK_ALIGN_POWER
4027 && DEFAULT_ABI == ABI_DARWIN
4028 && TARGET_64BIT)
4029 warning (0, "%qs is not supported for 64-bit Darwin;"
4030 " it is incompatible with the installed C and C++ libraries",
4031 "-malign-power");
4033 /* Numerous experiment shows that IRA based loop pressure
4034 calculation works better for RTL loop invariant motion on targets
4035 with enough (>= 32) registers. It is an expensive optimization.
4036 So it is on only for peak performance. */
4037 if (optimize >= 3 && global_init_p
4038 && !global_options_set.x_flag_ira_loop_pressure)
4039 flag_ira_loop_pressure = 1;
4041 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4042 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4043 options were already specified. */
4044 if (flag_sanitize & SANITIZE_USER_ADDRESS
4045 && !global_options_set.x_flag_asynchronous_unwind_tables)
4046 flag_asynchronous_unwind_tables = 1;
4048 /* Set the pointer size. */
4049 if (TARGET_64BIT)
4051 rs6000_pmode = DImode;
4052 rs6000_pointer_size = 64;
4054 else
4056 rs6000_pmode = SImode;
4057 rs6000_pointer_size = 32;
4060 /* Some OSs don't support saving the high part of 64-bit registers on context
4061 switch. Other OSs don't support saving Altivec registers. On those OSs,
4062 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4063 if the user wants either, the user must explicitly specify them and we
4064 won't interfere with the user's specification. */
4066 set_masks = POWERPC_MASKS;
4067 #ifdef OS_MISSING_POWERPC64
4068 if (OS_MISSING_POWERPC64)
4069 set_masks &= ~OPTION_MASK_POWERPC64;
4070 #endif
4071 #ifdef OS_MISSING_ALTIVEC
4072 if (OS_MISSING_ALTIVEC)
4073 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4074 | OTHER_VSX_VECTOR_MASKS);
4075 #endif
4077 /* Don't override by the processor default if given explicitly. */
4078 set_masks &= ~rs6000_isa_flags_explicit;
4080 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4081 the cpu in a target attribute or pragma, but did not specify a tuning
4082 option, use the cpu for the tuning option rather than the option specified
4083 with -mtune on the command line. Process a '--with-cpu' configuration
4084 request as an implicit --cpu. */
4085 if (rs6000_cpu_index >= 0)
4086 cpu_index = rs6000_cpu_index;
4087 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4088 cpu_index = main_target_opt->x_rs6000_cpu_index;
4089 else if (OPTION_TARGET_CPU_DEFAULT)
4090 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4092 if (cpu_index >= 0)
4094 const char *unavailable_cpu = NULL;
4095 switch (processor_target_table[cpu_index].processor)
4097 #ifndef HAVE_AS_POWER9
4098 case PROCESSOR_POWER9:
4099 unavailable_cpu = "power9";
4100 break;
4101 #endif
4102 #ifndef HAVE_AS_POWER8
4103 case PROCESSOR_POWER8:
4104 unavailable_cpu = "power8";
4105 break;
4106 #endif
4107 #ifndef HAVE_AS_POPCNTD
4108 case PROCESSOR_POWER7:
4109 unavailable_cpu = "power7";
4110 break;
4111 #endif
4112 #ifndef HAVE_AS_DFP
4113 case PROCESSOR_POWER6:
4114 unavailable_cpu = "power6";
4115 break;
4116 #endif
4117 #ifndef HAVE_AS_POPCNTB
4118 case PROCESSOR_POWER5:
4119 unavailable_cpu = "power5";
4120 break;
4121 #endif
4122 default:
4123 break;
4125 if (unavailable_cpu)
4127 cpu_index = -1;
4128 warning (0, "will not generate %qs instructions because "
4129 "assembler lacks %qs support", unavailable_cpu,
4130 unavailable_cpu);
4134 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4135 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4136 with those from the cpu, except for options that were explicitly set. If
4137 we don't have a cpu, do not override the target bits set in
4138 TARGET_DEFAULT. */
4139 if (cpu_index >= 0)
4141 rs6000_cpu_index = cpu_index;
4142 rs6000_isa_flags &= ~set_masks;
4143 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4144 & set_masks);
4146 else
4148 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4149 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4150 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4151 to using rs6000_isa_flags, we need to do the initialization here.
4153 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4154 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4155 HOST_WIDE_INT flags;
4156 if (TARGET_DEFAULT)
4157 flags = TARGET_DEFAULT;
4158 else
4160 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4161 const char *default_cpu = (!TARGET_POWERPC64
4162 ? "powerpc"
4163 : (BYTES_BIG_ENDIAN
4164 ? "powerpc64"
4165 : "powerpc64le"));
4166 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4167 flags = processor_target_table[default_cpu_index].target_enable;
4169 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4172 if (rs6000_tune_index >= 0)
4173 tune_index = rs6000_tune_index;
4174 else if (cpu_index >= 0)
4175 rs6000_tune_index = tune_index = cpu_index;
4176 else
4178 size_t i;
4179 enum processor_type tune_proc
4180 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4182 tune_index = -1;
4183 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4184 if (processor_target_table[i].processor == tune_proc)
4186 tune_index = i;
4187 break;
4191 if (cpu_index >= 0)
4192 rs6000_cpu = processor_target_table[cpu_index].processor;
4193 else
4194 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
4196 gcc_assert (tune_index >= 0);
4197 rs6000_tune = processor_target_table[tune_index].processor;
4199 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4200 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4201 || rs6000_cpu == PROCESSOR_PPCE5500)
4203 if (TARGET_ALTIVEC)
4204 error ("AltiVec not supported in this target");
4207 /* If we are optimizing big endian systems for space, use the load/store
4208 multiple instructions. */
4209 if (BYTES_BIG_ENDIAN && optimize_size)
4210 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
4212 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4213 because the hardware doesn't support the instructions used in little
4214 endian mode, and causes an alignment trap. The 750 does not cause an
4215 alignment trap (except when the target is unaligned). */
4217 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
4219 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4220 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4221 warning (0, "%qs is not supported on little endian systems",
4222 "-mmultiple");
4225 /* If little-endian, default to -mstrict-align on older processors.
4226 Testing for htm matches power8 and later. */
4227 if (!BYTES_BIG_ENDIAN
4228 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4229 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4231 /* -maltivec={le,be} implies -maltivec. */
4232 if (rs6000_altivec_element_order != 0)
4233 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4235 /* Disallow -maltivec=le in big endian mode for now. This is not
4236 known to be useful for anyone. */
4237 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4239 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4240 rs6000_altivec_element_order = 0;
4243 if (!rs6000_fold_gimple)
4244 fprintf (stderr,
4245 "gimple folding of rs6000 builtins has been disabled.\n");
4247 /* Add some warnings for VSX. */
4248 if (TARGET_VSX)
4250 const char *msg = NULL;
4251 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4253 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4254 msg = N_("-mvsx requires hardware floating point");
4255 else
4257 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4258 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4261 else if (TARGET_PAIRED_FLOAT)
4262 msg = N_("-mvsx and -mpaired are incompatible");
4263 else if (TARGET_AVOID_XFORM > 0)
4264 msg = N_("-mvsx needs indexed addressing");
4265 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4266 & OPTION_MASK_ALTIVEC))
4268 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4269 msg = N_("-mvsx and -mno-altivec are incompatible");
4270 else
4271 msg = N_("-mno-altivec disables vsx");
4274 if (msg)
4276 warning (0, msg);
4277 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4278 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4282 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4283 the -mcpu setting to enable options that conflict. */
4284 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4285 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4286 | OPTION_MASK_ALTIVEC
4287 | OPTION_MASK_VSX)) != 0)
4288 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4289 | OPTION_MASK_DIRECT_MOVE)
4290 & ~rs6000_isa_flags_explicit);
4292 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4293 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4295 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4296 off all of the options that depend on those flags. */
4297 ignore_masks = rs6000_disable_incompatible_switches ();
4299 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4300 unless the user explicitly used the -mno-<option> to disable the code. */
4301 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4302 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4303 else if (TARGET_P9_MINMAX)
4305 if (cpu_index >= 0)
4307 if (cpu_index == PROCESSOR_POWER9)
4309 /* legacy behavior: allow -mcpu=power9 with certain
4310 capabilities explicitly disabled. */
4311 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4313 else
4314 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4315 "for <xxx> less than power9", "-mcpu");
4317 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4318 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4319 & rs6000_isa_flags_explicit))
4320 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4321 were explicitly cleared. */
4322 error ("%qs incompatible with explicitly disabled options",
4323 "-mpower9-minmax");
4324 else
4325 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4327 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4328 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4329 else if (TARGET_VSX)
4330 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4331 else if (TARGET_POPCNTD)
4332 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4333 else if (TARGET_DFP)
4334 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4335 else if (TARGET_CMPB)
4336 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4337 else if (TARGET_FPRND)
4338 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4339 else if (TARGET_POPCNTB)
4340 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4341 else if (TARGET_ALTIVEC)
4342 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4344 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4346 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4347 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4348 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4351 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4353 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4354 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4355 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4358 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4360 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4361 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4362 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4365 if (TARGET_P8_VECTOR && !TARGET_VSX)
4367 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4368 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4369 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4370 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4372 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4373 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4374 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4376 else
4378 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4379 not explicit. */
4380 rs6000_isa_flags |= OPTION_MASK_VSX;
4381 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4385 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4387 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4388 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4389 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4392 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4393 silently turn off quad memory mode. */
4394 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4396 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4397 warning (0, N_("-mquad-memory requires 64-bit mode"));
4399 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4400 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4402 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4403 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4406 /* Non-atomic quad memory load/store are disabled for little endian, since
4407 the words are reversed, but atomic operations can still be done by
4408 swapping the words. */
4409 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4411 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4412 warning (0, N_("-mquad-memory is not available in little endian "
4413 "mode"));
4415 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4418 /* Assume if the user asked for normal quad memory instructions, they want
4419 the atomic versions as well, unless they explicity told us not to use quad
4420 word atomic instructions. */
4421 if (TARGET_QUAD_MEMORY
4422 && !TARGET_QUAD_MEMORY_ATOMIC
4423 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4424 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4426 /* If we can shrink-wrap the TOC register save separately, then use
4427 -msave-toc-indirect unless explicitly disabled. */
4428 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4429 && flag_shrink_wrap_separate
4430 && optimize_function_for_speed_p (cfun))
4431 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4433 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4434 generating power8 instructions. */
4435 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4436 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4437 & OPTION_MASK_P8_FUSION);
4439 /* Setting additional fusion flags turns on base fusion. */
4440 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4442 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4444 if (TARGET_P8_FUSION_SIGN)
4445 error ("%qs requires %qs", "-mpower8-fusion-sign",
4446 "-mpower8-fusion");
4448 if (TARGET_TOC_FUSION)
4449 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4451 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4453 else
4454 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4457 /* Power9 fusion is a superset over power8 fusion. */
4458 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4460 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4462 /* We prefer to not mention undocumented options in
4463 error messages. However, if users have managed to select
4464 power9-fusion without selecting power8-fusion, they
4465 already know about undocumented flags. */
4466 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4467 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4469 else
4470 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4473 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4474 generating power9 instructions. */
4475 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4476 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4477 & OPTION_MASK_P9_FUSION);
4479 /* Power8 does not fuse sign extended loads with the addis. If we are
4480 optimizing at high levels for speed, convert a sign extended load into a
4481 zero extending load, and an explicit sign extension. */
4482 if (TARGET_P8_FUSION
4483 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4484 && optimize_function_for_speed_p (cfun)
4485 && optimize >= 3)
4486 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4488 /* TOC fusion requires 64-bit and medium/large code model. */
4489 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4491 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4492 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4493 warning (0, N_("-mtoc-fusion requires 64-bit"));
4496 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4498 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4499 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4500 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4503 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4504 model. */
4505 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4506 && (TARGET_CMODEL != CMODEL_SMALL)
4507 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4508 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4510 /* ISA 3.0 vector instructions include ISA 2.07. */
4511 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4513 /* We prefer to not mention undocumented options in
4514 error messages. However, if users have managed to select
4515 power9-vector without selecting power8-vector, they
4516 already know about undocumented flags. */
4517 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4518 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4519 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4520 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4522 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4523 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4524 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4526 else
4528 /* OPTION_MASK_P9_VECTOR is explicit and
4529 OPTION_MASK_P8_VECTOR is not explicit. */
4530 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4531 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4535 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4536 support. If we only have ISA 2.06 support, and the user did not specify
4537 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4538 but we don't enable the full vectorization support */
4539 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4540 TARGET_ALLOW_MOVMISALIGN = 1;
4542 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4544 if (TARGET_ALLOW_MOVMISALIGN > 0
4545 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4546 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4548 TARGET_ALLOW_MOVMISALIGN = 0;
4551 /* Determine when unaligned vector accesses are permitted, and when
4552 they are preferred over masked Altivec loads. Note that if
4553 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4554 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4555 not true. */
4556 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4558 if (!TARGET_VSX)
4560 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4561 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4563 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4566 else if (!TARGET_ALLOW_MOVMISALIGN)
4568 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4569 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4570 "-mallow-movmisalign");
4572 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4576 /* Set long double size before the IEEE 128-bit tests. */
4577 if (!global_options_set.x_rs6000_long_double_type_size)
4579 if (main_target_opt != NULL
4580 && (main_target_opt->x_rs6000_long_double_type_size
4581 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4582 error ("target attribute or pragma changes long double size");
4583 else
4584 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4587 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4588 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4589 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4590 those systems will not pick up this default. Warn if the user changes the
4591 default unless either the user used the -Wno-psabi option, or the compiler
4592 was built to enable multilibs to switch between the two long double
4593 types. */
4594 if (!global_options_set.x_rs6000_ieeequad)
4595 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4597 else if (!TARGET_IEEEQUAD_MULTILIB
4598 && rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT
4599 && TARGET_LONG_DOUBLE_128)
4601 static bool warned_change_long_double;
4602 if (!warned_change_long_double)
4604 warned_change_long_double = true;
4605 if (TARGET_IEEEQUAD)
4606 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4607 else
4608 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4612 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4613 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4614 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4615 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4616 the keyword as well as the type. */
4617 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4619 /* IEEE 128-bit floating point requires VSX support. */
4620 if (TARGET_FLOAT128_KEYWORD)
4622 if (!TARGET_VSX)
4624 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4625 error ("%qs requires VSX support", "-mfloat128");
4627 TARGET_FLOAT128_TYPE = 0;
4628 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4629 | OPTION_MASK_FLOAT128_HW);
4631 else if (!TARGET_FLOAT128_TYPE)
4633 TARGET_FLOAT128_TYPE = 1;
4634 warning (0, "The -mfloat128 option may not be fully supported");
4638 /* Enable the __float128 keyword under Linux by default. */
4639 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4640 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4641 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4643 /* If we have are supporting the float128 type and full ISA 3.0 support,
4644 enable -mfloat128-hardware by default. However, don't enable the
4645 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4646 because sometimes the compiler wants to put things in an integer
4647 container, and if we don't have __int128 support, it is impossible. */
4648 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4649 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4650 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4651 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4653 if (TARGET_FLOAT128_HW
4654 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4656 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4657 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4659 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4662 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4664 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4665 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4667 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4670 /* Print the options after updating the defaults. */
4671 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4672 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4674 /* E500mc does "better" if we inline more aggressively. Respect the
4675 user's opinion, though. */
4676 if (rs6000_block_move_inline_limit == 0
4677 && (rs6000_tune == PROCESSOR_PPCE500MC
4678 || rs6000_tune == PROCESSOR_PPCE500MC64
4679 || rs6000_tune == PROCESSOR_PPCE5500
4680 || rs6000_tune == PROCESSOR_PPCE6500))
4681 rs6000_block_move_inline_limit = 128;
4683 /* store_one_arg depends on expand_block_move to handle at least the
4684 size of reg_parm_stack_space. */
4685 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4686 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4688 if (global_init_p)
4690 /* If the appropriate debug option is enabled, replace the target hooks
4691 with debug versions that call the real version and then prints
4692 debugging information. */
4693 if (TARGET_DEBUG_COST)
4695 targetm.rtx_costs = rs6000_debug_rtx_costs;
4696 targetm.address_cost = rs6000_debug_address_cost;
4697 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4700 if (TARGET_DEBUG_ADDR)
4702 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4703 targetm.legitimize_address = rs6000_debug_legitimize_address;
4704 rs6000_secondary_reload_class_ptr
4705 = rs6000_debug_secondary_reload_class;
4706 targetm.secondary_memory_needed
4707 = rs6000_debug_secondary_memory_needed;
4708 targetm.can_change_mode_class
4709 = rs6000_debug_can_change_mode_class;
4710 rs6000_preferred_reload_class_ptr
4711 = rs6000_debug_preferred_reload_class;
4712 rs6000_legitimize_reload_address_ptr
4713 = rs6000_debug_legitimize_reload_address;
4714 rs6000_mode_dependent_address_ptr
4715 = rs6000_debug_mode_dependent_address;
4718 if (rs6000_veclibabi_name)
4720 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4721 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4722 else
4724 error ("unknown vectorization library ABI type (%qs) for "
4725 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4726 ret = false;
4731 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4732 target attribute or pragma which automatically enables both options,
4733 unless the altivec ABI was set. This is set by default for 64-bit, but
4734 not for 32-bit. */
4735 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4737 TARGET_FLOAT128_TYPE = 0;
4738 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4739 | OPTION_MASK_FLOAT128_KEYWORD)
4740 & ~rs6000_isa_flags_explicit);
4743 /* Enable Altivec ABI for AIX -maltivec. */
4744 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4746 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4747 error ("target attribute or pragma changes AltiVec ABI");
4748 else
4749 rs6000_altivec_abi = 1;
4752 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4753 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4754 be explicitly overridden in either case. */
4755 if (TARGET_ELF)
4757 if (!global_options_set.x_rs6000_altivec_abi
4758 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4760 if (main_target_opt != NULL &&
4761 !main_target_opt->x_rs6000_altivec_abi)
4762 error ("target attribute or pragma changes AltiVec ABI");
4763 else
4764 rs6000_altivec_abi = 1;
4768 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4769 So far, the only darwin64 targets are also MACH-O. */
4770 if (TARGET_MACHO
4771 && DEFAULT_ABI == ABI_DARWIN
4772 && TARGET_64BIT)
4774 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4775 error ("target attribute or pragma changes darwin64 ABI");
4776 else
4778 rs6000_darwin64_abi = 1;
4779 /* Default to natural alignment, for better performance. */
4780 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4784 /* Place FP constants in the constant pool instead of TOC
4785 if section anchors enabled. */
4786 if (flag_section_anchors
4787 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4788 TARGET_NO_FP_IN_TOC = 1;
4790 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4791 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4793 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4794 SUBTARGET_OVERRIDE_OPTIONS;
4795 #endif
4796 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4797 SUBSUBTARGET_OVERRIDE_OPTIONS;
4798 #endif
4799 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4800 SUB3TARGET_OVERRIDE_OPTIONS;
4801 #endif
4803 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4804 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4806 /* For the E500 family of cores, reset the single/double FP flags to let us
4807 check that they remain constant across attributes or pragmas. */
4809 switch (rs6000_cpu)
4811 case PROCESSOR_PPC8540:
4812 case PROCESSOR_PPC8548:
4813 case PROCESSOR_PPCE500MC:
4814 case PROCESSOR_PPCE500MC64:
4815 case PROCESSOR_PPCE5500:
4816 case PROCESSOR_PPCE6500:
4817 rs6000_single_float = 0;
4818 rs6000_double_float = 0;
4819 break;
4821 default:
4822 break;
4825 if (main_target_opt)
4827 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4828 error ("target attribute or pragma changes single precision floating "
4829 "point");
4830 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4831 error ("target attribute or pragma changes double precision floating "
4832 "point");
4835 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4836 && rs6000_tune != PROCESSOR_POWER5
4837 && rs6000_tune != PROCESSOR_POWER6
4838 && rs6000_tune != PROCESSOR_POWER7
4839 && rs6000_tune != PROCESSOR_POWER8
4840 && rs6000_tune != PROCESSOR_POWER9
4841 && rs6000_tune != PROCESSOR_PPCA2
4842 && rs6000_tune != PROCESSOR_CELL
4843 && rs6000_tune != PROCESSOR_PPC476);
4844 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4845 || rs6000_tune == PROCESSOR_POWER5
4846 || rs6000_tune == PROCESSOR_POWER7
4847 || rs6000_tune == PROCESSOR_POWER8);
4848 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4849 || rs6000_tune == PROCESSOR_POWER5
4850 || rs6000_tune == PROCESSOR_POWER6
4851 || rs6000_tune == PROCESSOR_POWER7
4852 || rs6000_tune == PROCESSOR_POWER8
4853 || rs6000_tune == PROCESSOR_POWER9
4854 || rs6000_tune == PROCESSOR_PPCE500MC
4855 || rs6000_tune == PROCESSOR_PPCE500MC64
4856 || rs6000_tune == PROCESSOR_PPCE5500
4857 || rs6000_tune == PROCESSOR_PPCE6500);
4859 /* Allow debug switches to override the above settings. These are set to -1
4860 in rs6000.opt to indicate the user hasn't directly set the switch. */
4861 if (TARGET_ALWAYS_HINT >= 0)
4862 rs6000_always_hint = TARGET_ALWAYS_HINT;
4864 if (TARGET_SCHED_GROUPS >= 0)
4865 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4867 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4868 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4870 rs6000_sched_restricted_insns_priority
4871 = (rs6000_sched_groups ? 1 : 0);
4873 /* Handle -msched-costly-dep option. */
4874 rs6000_sched_costly_dep
4875 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4877 if (rs6000_sched_costly_dep_str)
4879 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4880 rs6000_sched_costly_dep = no_dep_costly;
4881 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4882 rs6000_sched_costly_dep = all_deps_costly;
4883 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4884 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4885 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4886 rs6000_sched_costly_dep = store_to_load_dep_costly;
4887 else
4888 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4889 atoi (rs6000_sched_costly_dep_str));
4892 /* Handle -minsert-sched-nops option. */
4893 rs6000_sched_insert_nops
4894 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4896 if (rs6000_sched_insert_nops_str)
4898 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4899 rs6000_sched_insert_nops = sched_finish_none;
4900 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4901 rs6000_sched_insert_nops = sched_finish_pad_groups;
4902 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4903 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4904 else
4905 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4906 atoi (rs6000_sched_insert_nops_str));
4909 /* Handle stack protector */
4910 if (!global_options_set.x_rs6000_stack_protector_guard)
4911 #ifdef TARGET_THREAD_SSP_OFFSET
4912 rs6000_stack_protector_guard = SSP_TLS;
4913 #else
4914 rs6000_stack_protector_guard = SSP_GLOBAL;
4915 #endif
4917 #ifdef TARGET_THREAD_SSP_OFFSET
4918 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4919 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4920 #endif
4922 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4924 char *endp;
4925 const char *str = rs6000_stack_protector_guard_offset_str;
4927 errno = 0;
4928 long offset = strtol (str, &endp, 0);
4929 if (!*str || *endp || errno)
4930 error ("%qs is not a valid number in %qs", str,
4931 "-mstack-protector-guard-offset=");
4933 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4934 || (TARGET_64BIT && (offset & 3)))
4935 error ("%qs is not a valid offset in %qs", str,
4936 "-mstack-protector-guard-offset=");
4938 rs6000_stack_protector_guard_offset = offset;
4941 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4943 const char *str = rs6000_stack_protector_guard_reg_str;
4944 int reg = decode_reg_name (str);
4946 if (!IN_RANGE (reg, 1, 31))
4947 error ("%qs is not a valid base register in %qs", str,
4948 "-mstack-protector-guard-reg=");
4950 rs6000_stack_protector_guard_reg = reg;
4953 if (rs6000_stack_protector_guard == SSP_TLS
4954 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4955 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4957 if (global_init_p)
4959 #ifdef TARGET_REGNAMES
4960 /* If the user desires alternate register names, copy in the
4961 alternate names now. */
4962 if (TARGET_REGNAMES)
4963 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4964 #endif
4966 /* Set aix_struct_return last, after the ABI is determined.
4967 If -maix-struct-return or -msvr4-struct-return was explicitly
4968 used, don't override with the ABI default. */
4969 if (!global_options_set.x_aix_struct_return)
4970 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4972 #if 0
4973 /* IBM XL compiler defaults to unsigned bitfields. */
4974 if (TARGET_XL_COMPAT)
4975 flag_signed_bitfields = 0;
4976 #endif
4978 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4979 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4981 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4983 /* We can only guarantee the availability of DI pseudo-ops when
4984 assembling for 64-bit targets. */
4985 if (!TARGET_64BIT)
4987 targetm.asm_out.aligned_op.di = NULL;
4988 targetm.asm_out.unaligned_op.di = NULL;
4992 /* Set branch target alignment, if not optimizing for size. */
4993 if (!optimize_size)
4995 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4996 aligned 8byte to avoid misprediction by the branch predictor. */
4997 if (rs6000_tune == PROCESSOR_TITAN
4998 || rs6000_tune == PROCESSOR_CELL)
5000 if (align_functions <= 0)
5001 align_functions = 8;
5002 if (align_jumps <= 0)
5003 align_jumps = 8;
5004 if (align_loops <= 0)
5005 align_loops = 8;
5007 if (rs6000_align_branch_targets)
5009 if (align_functions <= 0)
5010 align_functions = 16;
5011 if (align_jumps <= 0)
5012 align_jumps = 16;
5013 if (align_loops <= 0)
5015 can_override_loop_align = 1;
5016 align_loops = 16;
5019 if (align_jumps_max_skip <= 0)
5020 align_jumps_max_skip = 15;
5021 if (align_loops_max_skip <= 0)
5022 align_loops_max_skip = 15;
5025 /* Arrange to save and restore machine status around nested functions. */
5026 init_machine_status = rs6000_init_machine_status;
5028 /* We should always be splitting complex arguments, but we can't break
5029 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5030 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5031 targetm.calls.split_complex_arg = NULL;
5033 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5034 if (DEFAULT_ABI == ABI_AIX)
5035 targetm.calls.custom_function_descriptors = 0;
5038 /* Initialize rs6000_cost with the appropriate target costs. */
5039 if (optimize_size)
5040 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5041 else
5042 switch (rs6000_tune)
5044 case PROCESSOR_RS64A:
5045 rs6000_cost = &rs64a_cost;
5046 break;
5048 case PROCESSOR_MPCCORE:
5049 rs6000_cost = &mpccore_cost;
5050 break;
5052 case PROCESSOR_PPC403:
5053 rs6000_cost = &ppc403_cost;
5054 break;
5056 case PROCESSOR_PPC405:
5057 rs6000_cost = &ppc405_cost;
5058 break;
5060 case PROCESSOR_PPC440:
5061 rs6000_cost = &ppc440_cost;
5062 break;
5064 case PROCESSOR_PPC476:
5065 rs6000_cost = &ppc476_cost;
5066 break;
5068 case PROCESSOR_PPC601:
5069 rs6000_cost = &ppc601_cost;
5070 break;
5072 case PROCESSOR_PPC603:
5073 rs6000_cost = &ppc603_cost;
5074 break;
5076 case PROCESSOR_PPC604:
5077 rs6000_cost = &ppc604_cost;
5078 break;
5080 case PROCESSOR_PPC604e:
5081 rs6000_cost = &ppc604e_cost;
5082 break;
5084 case PROCESSOR_PPC620:
5085 rs6000_cost = &ppc620_cost;
5086 break;
5088 case PROCESSOR_PPC630:
5089 rs6000_cost = &ppc630_cost;
5090 break;
5092 case PROCESSOR_CELL:
5093 rs6000_cost = &ppccell_cost;
5094 break;
5096 case PROCESSOR_PPC750:
5097 case PROCESSOR_PPC7400:
5098 rs6000_cost = &ppc750_cost;
5099 break;
5101 case PROCESSOR_PPC7450:
5102 rs6000_cost = &ppc7450_cost;
5103 break;
5105 case PROCESSOR_PPC8540:
5106 case PROCESSOR_PPC8548:
5107 rs6000_cost = &ppc8540_cost;
5108 break;
5110 case PROCESSOR_PPCE300C2:
5111 case PROCESSOR_PPCE300C3:
5112 rs6000_cost = &ppce300c2c3_cost;
5113 break;
5115 case PROCESSOR_PPCE500MC:
5116 rs6000_cost = &ppce500mc_cost;
5117 break;
5119 case PROCESSOR_PPCE500MC64:
5120 rs6000_cost = &ppce500mc64_cost;
5121 break;
5123 case PROCESSOR_PPCE5500:
5124 rs6000_cost = &ppce5500_cost;
5125 break;
5127 case PROCESSOR_PPCE6500:
5128 rs6000_cost = &ppce6500_cost;
5129 break;
5131 case PROCESSOR_TITAN:
5132 rs6000_cost = &titan_cost;
5133 break;
5135 case PROCESSOR_POWER4:
5136 case PROCESSOR_POWER5:
5137 rs6000_cost = &power4_cost;
5138 break;
5140 case PROCESSOR_POWER6:
5141 rs6000_cost = &power6_cost;
5142 break;
5144 case PROCESSOR_POWER7:
5145 rs6000_cost = &power7_cost;
5146 break;
5148 case PROCESSOR_POWER8:
5149 rs6000_cost = &power8_cost;
5150 break;
5152 case PROCESSOR_POWER9:
5153 rs6000_cost = &power9_cost;
5154 break;
5156 case PROCESSOR_PPCA2:
5157 rs6000_cost = &ppca2_cost;
5158 break;
5160 default:
5161 gcc_unreachable ();
5164 if (global_init_p)
5166 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5167 rs6000_cost->simultaneous_prefetches,
5168 global_options.x_param_values,
5169 global_options_set.x_param_values);
5170 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5171 global_options.x_param_values,
5172 global_options_set.x_param_values);
5173 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5174 rs6000_cost->cache_line_size,
5175 global_options.x_param_values,
5176 global_options_set.x_param_values);
5177 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5178 global_options.x_param_values,
5179 global_options_set.x_param_values);
5181 /* Increase loop peeling limits based on performance analysis. */
5182 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5183 global_options.x_param_values,
5184 global_options_set.x_param_values);
5185 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5189 /* Use the 'model' -fsched-pressure algorithm by default. */
5190 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5191 SCHED_PRESSURE_MODEL,
5192 global_options.x_param_values,
5193 global_options_set.x_param_values);
5195 /* If using typedef char *va_list, signal that
5196 __builtin_va_start (&ap, 0) can be optimized to
5197 ap = __builtin_next_arg (0). */
5198 if (DEFAULT_ABI != ABI_V4)
5199 targetm.expand_builtin_va_start = NULL;
5202 /* Set up single/double float flags.
5203 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5204 then set both flags. */
5205 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5206 rs6000_single_float = rs6000_double_float = 1;
5208 /* If not explicitly specified via option, decide whether to generate indexed
5209 load/store instructions. A value of -1 indicates that the
5210 initial value of this variable has not been overwritten. During
5211 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5212 if (TARGET_AVOID_XFORM == -1)
5213 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5214 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5215 need indexed accesses and the type used is the scalar type of the element
5216 being loaded or stored. */
5217 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
5218 && !TARGET_ALTIVEC);
5220 /* Set the -mrecip options. */
5221 if (rs6000_recip_name)
5223 char *p = ASTRDUP (rs6000_recip_name);
5224 char *q;
5225 unsigned int mask, i;
5226 bool invert;
5228 while ((q = strtok (p, ",")) != NULL)
5230 p = NULL;
5231 if (*q == '!')
5233 invert = true;
5234 q++;
5236 else
5237 invert = false;
5239 if (!strcmp (q, "default"))
5240 mask = ((TARGET_RECIP_PRECISION)
5241 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5242 else
5244 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5245 if (!strcmp (q, recip_options[i].string))
5247 mask = recip_options[i].mask;
5248 break;
5251 if (i == ARRAY_SIZE (recip_options))
5253 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5254 invert = false;
5255 mask = 0;
5256 ret = false;
5260 if (invert)
5261 rs6000_recip_control &= ~mask;
5262 else
5263 rs6000_recip_control |= mask;
5267 /* Set the builtin mask of the various options used that could affect which
5268 builtins were used. In the past we used target_flags, but we've run out
5269 of bits, and some options like PAIRED are no longer in target_flags. */
5270 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5271 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5272 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5273 rs6000_builtin_mask);
5275 /* Initialize all of the registers. */
5276 rs6000_init_hard_regno_mode_ok (global_init_p);
5278 /* Save the initial options in case the user does function specific options */
5279 if (global_init_p)
5280 target_option_default_node = target_option_current_node
5281 = build_target_option_node (&global_options);
5283 /* If not explicitly specified via option, decide whether to generate the
5284 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5285 if (TARGET_LINK_STACK == -1)
5286 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
5288 return ret;
5291 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5292 define the target cpu type. */
5294 static void
5295 rs6000_option_override (void)
5297 (void) rs6000_option_override_internal (true);
5301 /* Implement targetm.vectorize.builtin_mask_for_load. */
5302 static tree
5303 rs6000_builtin_mask_for_load (void)
5305 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5306 if ((TARGET_ALTIVEC && !TARGET_VSX)
5307 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5308 return altivec_builtin_mask_for_load;
5309 else
5310 return 0;
5313 /* Implement LOOP_ALIGN. */
5315 rs6000_loop_align (rtx label)
5317 basic_block bb;
5318 int ninsns;
5320 /* Don't override loop alignment if -falign-loops was specified. */
5321 if (!can_override_loop_align)
5322 return align_loops_log;
5324 bb = BLOCK_FOR_INSN (label);
5325 ninsns = num_loop_insns(bb->loop_father);
5327 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5328 if (ninsns > 4 && ninsns <= 8
5329 && (rs6000_tune == PROCESSOR_POWER4
5330 || rs6000_tune == PROCESSOR_POWER5
5331 || rs6000_tune == PROCESSOR_POWER6
5332 || rs6000_tune == PROCESSOR_POWER7
5333 || rs6000_tune == PROCESSOR_POWER8
5334 || rs6000_tune == PROCESSOR_POWER9))
5335 return 5;
5336 else
5337 return align_loops_log;
5340 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5341 static int
5342 rs6000_loop_align_max_skip (rtx_insn *label)
5344 return (1 << rs6000_loop_align (label)) - 1;
5347 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5348 after applying N number of iterations. This routine does not determine
5349 how may iterations are required to reach desired alignment. */
5351 static bool
5352 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5354 if (is_packed)
5355 return false;
5357 if (TARGET_32BIT)
5359 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5360 return true;
5362 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5363 return true;
5365 return false;
5367 else
5369 if (TARGET_MACHO)
5370 return false;
5372 /* Assuming that all other types are naturally aligned. CHECKME! */
5373 return true;
5377 /* Return true if the vector misalignment factor is supported by the
5378 target. */
5379 static bool
5380 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5381 const_tree type,
5382 int misalignment,
5383 bool is_packed)
5385 if (TARGET_VSX)
5387 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5388 return true;
5390 /* Return if movmisalign pattern is not supported for this mode. */
5391 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5392 return false;
5394 if (misalignment == -1)
5396 /* Misalignment factor is unknown at compile time but we know
5397 it's word aligned. */
5398 if (rs6000_vector_alignment_reachable (type, is_packed))
5400 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5402 if (element_size == 64 || element_size == 32)
5403 return true;
5406 return false;
5409 /* VSX supports word-aligned vector. */
5410 if (misalignment % 4 == 0)
5411 return true;
5413 return false;
5416 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5417 static int
5418 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5419 tree vectype, int misalign)
5421 unsigned elements;
5422 tree elem_type;
5424 switch (type_of_cost)
5426 case scalar_stmt:
5427 case scalar_load:
5428 case scalar_store:
5429 case vector_stmt:
5430 case vector_load:
5431 case vector_store:
5432 case vec_to_scalar:
5433 case scalar_to_vec:
5434 case cond_branch_not_taken:
5435 return 1;
5437 case vec_perm:
5438 if (TARGET_VSX)
5439 return 3;
5440 else
5441 return 1;
5443 case vec_promote_demote:
5444 if (TARGET_VSX)
5445 return 4;
5446 else
5447 return 1;
5449 case cond_branch_taken:
5450 return 3;
5452 case unaligned_load:
5453 case vector_gather_load:
5454 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5455 return 1;
5457 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5459 elements = TYPE_VECTOR_SUBPARTS (vectype);
5460 if (elements == 2)
5461 /* Double word aligned. */
5462 return 2;
5464 if (elements == 4)
5466 switch (misalign)
5468 case 8:
5469 /* Double word aligned. */
5470 return 2;
5472 case -1:
5473 /* Unknown misalignment. */
5474 case 4:
5475 case 12:
5476 /* Word aligned. */
5477 return 22;
5479 default:
5480 gcc_unreachable ();
5485 if (TARGET_ALTIVEC)
5486 /* Misaligned loads are not supported. */
5487 gcc_unreachable ();
5489 return 2;
5491 case unaligned_store:
5492 case vector_scatter_store:
5493 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5494 return 1;
5496 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5498 elements = TYPE_VECTOR_SUBPARTS (vectype);
5499 if (elements == 2)
5500 /* Double word aligned. */
5501 return 2;
5503 if (elements == 4)
5505 switch (misalign)
5507 case 8:
5508 /* Double word aligned. */
5509 return 2;
5511 case -1:
5512 /* Unknown misalignment. */
5513 case 4:
5514 case 12:
5515 /* Word aligned. */
5516 return 23;
5518 default:
5519 gcc_unreachable ();
5524 if (TARGET_ALTIVEC)
5525 /* Misaligned stores are not supported. */
5526 gcc_unreachable ();
5528 return 2;
5530 case vec_construct:
5531 /* This is a rough approximation assuming non-constant elements
5532 constructed into a vector via element insertion. FIXME:
5533 vec_construct is not granular enough for uniformly good
5534 decisions. If the initialization is a splat, this is
5535 cheaper than we estimate. Improve this someday. */
5536 elem_type = TREE_TYPE (vectype);
5537 /* 32-bit vectors loaded into registers are stored as double
5538 precision, so we need 2 permutes, 2 converts, and 1 merge
5539 to construct a vector of short floats from them. */
5540 if (SCALAR_FLOAT_TYPE_P (elem_type)
5541 && TYPE_PRECISION (elem_type) == 32)
5542 return 5;
5543 /* On POWER9, integer vector types are built up in GPRs and then
5544 use a direct move (2 cycles). For POWER8 this is even worse,
5545 as we need two direct moves and a merge, and the direct moves
5546 are five cycles. */
5547 else if (INTEGRAL_TYPE_P (elem_type))
5549 if (TARGET_P9_VECTOR)
5550 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5551 else
5552 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5554 else
5555 /* V2DFmode doesn't need a direct move. */
5556 return 2;
5558 default:
5559 gcc_unreachable ();
5563 /* Implement targetm.vectorize.preferred_simd_mode. */
5565 static machine_mode
5566 rs6000_preferred_simd_mode (scalar_mode mode)
5568 if (TARGET_VSX)
5569 switch (mode)
5571 case E_DFmode:
5572 return V2DFmode;
5573 default:;
5575 if (TARGET_ALTIVEC || TARGET_VSX)
5576 switch (mode)
5578 case E_SFmode:
5579 return V4SFmode;
5580 case E_TImode:
5581 return V1TImode;
5582 case E_DImode:
5583 return V2DImode;
5584 case E_SImode:
5585 return V4SImode;
5586 case E_HImode:
5587 return V8HImode;
5588 case E_QImode:
5589 return V16QImode;
5590 default:;
5592 if (TARGET_PAIRED_FLOAT
5593 && mode == SFmode)
5594 return V2SFmode;
5595 return word_mode;
5598 typedef struct _rs6000_cost_data
5600 struct loop *loop_info;
5601 unsigned cost[3];
5602 } rs6000_cost_data;
5604 /* Test for likely overcommitment of vector hardware resources. If a
5605 loop iteration is relatively large, and too large a percentage of
5606 instructions in the loop are vectorized, the cost model may not
5607 adequately reflect delays from unavailable vector resources.
5608 Penalize the loop body cost for this case. */
5610 static void
5611 rs6000_density_test (rs6000_cost_data *data)
5613 const int DENSITY_PCT_THRESHOLD = 85;
5614 const int DENSITY_SIZE_THRESHOLD = 70;
5615 const int DENSITY_PENALTY = 10;
5616 struct loop *loop = data->loop_info;
5617 basic_block *bbs = get_loop_body (loop);
5618 int nbbs = loop->num_nodes;
5619 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5620 int i, density_pct;
5622 for (i = 0; i < nbbs; i++)
5624 basic_block bb = bbs[i];
5625 gimple_stmt_iterator gsi;
5627 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5629 gimple *stmt = gsi_stmt (gsi);
5630 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5632 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5633 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5634 not_vec_cost++;
5638 free (bbs);
5639 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5641 if (density_pct > DENSITY_PCT_THRESHOLD
5642 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5644 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5645 if (dump_enabled_p ())
5646 dump_printf_loc (MSG_NOTE, vect_location,
5647 "density %d%%, cost %d exceeds threshold, penalizing "
5648 "loop body cost by %d%%", density_pct,
5649 vec_cost + not_vec_cost, DENSITY_PENALTY);
5653 /* Implement targetm.vectorize.init_cost. */
5655 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5656 instruction is needed by the vectorization. */
5657 static bool rs6000_vect_nonmem;
5659 static void *
5660 rs6000_init_cost (struct loop *loop_info)
5662 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5663 data->loop_info = loop_info;
5664 data->cost[vect_prologue] = 0;
5665 data->cost[vect_body] = 0;
5666 data->cost[vect_epilogue] = 0;
5667 rs6000_vect_nonmem = false;
5668 return data;
5671 /* Implement targetm.vectorize.add_stmt_cost. */
5673 static unsigned
5674 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5675 struct _stmt_vec_info *stmt_info, int misalign,
5676 enum vect_cost_model_location where)
5678 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5679 unsigned retval = 0;
5681 if (flag_vect_cost_model)
5683 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5684 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5685 misalign);
5686 /* Statements in an inner loop relative to the loop being
5687 vectorized are weighted more heavily. The value here is
5688 arbitrary and could potentially be improved with analysis. */
5689 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5690 count *= 50; /* FIXME. */
5692 retval = (unsigned) (count * stmt_cost);
5693 cost_data->cost[where] += retval;
5695 /* Check whether we're doing something other than just a copy loop.
5696 Not all such loops may be profitably vectorized; see
5697 rs6000_finish_cost. */
5698 if ((kind == vec_to_scalar || kind == vec_perm
5699 || kind == vec_promote_demote || kind == vec_construct
5700 || kind == scalar_to_vec)
5701 || (where == vect_body && kind == vector_stmt))
5702 rs6000_vect_nonmem = true;
5705 return retval;
5708 /* Implement targetm.vectorize.finish_cost. */
5710 static void
5711 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5712 unsigned *body_cost, unsigned *epilogue_cost)
5714 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5716 if (cost_data->loop_info)
5717 rs6000_density_test (cost_data);
5719 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5720 that require versioning for any reason. The vectorization is at
5721 best a wash inside the loop, and the versioning checks make
5722 profitability highly unlikely and potentially quite harmful. */
5723 if (cost_data->loop_info)
5725 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5726 if (!rs6000_vect_nonmem
5727 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5728 && LOOP_REQUIRES_VERSIONING (vec_info))
5729 cost_data->cost[vect_body] += 10000;
5732 *prologue_cost = cost_data->cost[vect_prologue];
5733 *body_cost = cost_data->cost[vect_body];
5734 *epilogue_cost = cost_data->cost[vect_epilogue];
5737 /* Implement targetm.vectorize.destroy_cost_data. */
5739 static void
5740 rs6000_destroy_cost_data (void *data)
5742 free (data);
5745 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5746 library with vectorized intrinsics. */
5748 static tree
5749 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5750 tree type_in)
5752 char name[32];
5753 const char *suffix = NULL;
5754 tree fntype, new_fndecl, bdecl = NULL_TREE;
5755 int n_args = 1;
5756 const char *bname;
5757 machine_mode el_mode, in_mode;
5758 int n, in_n;
5760 /* Libmass is suitable for unsafe math only as it does not correctly support
5761 parts of IEEE with the required precision such as denormals. Only support
5762 it if we have VSX to use the simd d2 or f4 functions.
5763 XXX: Add variable length support. */
5764 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5765 return NULL_TREE;
5767 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5768 n = TYPE_VECTOR_SUBPARTS (type_out);
5769 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5770 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5771 if (el_mode != in_mode
5772 || n != in_n)
5773 return NULL_TREE;
5775 switch (fn)
5777 CASE_CFN_ATAN2:
5778 CASE_CFN_HYPOT:
5779 CASE_CFN_POW:
5780 n_args = 2;
5781 gcc_fallthrough ();
5783 CASE_CFN_ACOS:
5784 CASE_CFN_ACOSH:
5785 CASE_CFN_ASIN:
5786 CASE_CFN_ASINH:
5787 CASE_CFN_ATAN:
5788 CASE_CFN_ATANH:
5789 CASE_CFN_CBRT:
5790 CASE_CFN_COS:
5791 CASE_CFN_COSH:
5792 CASE_CFN_ERF:
5793 CASE_CFN_ERFC:
5794 CASE_CFN_EXP2:
5795 CASE_CFN_EXP:
5796 CASE_CFN_EXPM1:
5797 CASE_CFN_LGAMMA:
5798 CASE_CFN_LOG10:
5799 CASE_CFN_LOG1P:
5800 CASE_CFN_LOG2:
5801 CASE_CFN_LOG:
5802 CASE_CFN_SIN:
5803 CASE_CFN_SINH:
5804 CASE_CFN_SQRT:
5805 CASE_CFN_TAN:
5806 CASE_CFN_TANH:
5807 if (el_mode == DFmode && n == 2)
5809 bdecl = mathfn_built_in (double_type_node, fn);
5810 suffix = "d2"; /* pow -> powd2 */
5812 else if (el_mode == SFmode && n == 4)
5814 bdecl = mathfn_built_in (float_type_node, fn);
5815 suffix = "4"; /* powf -> powf4 */
5817 else
5818 return NULL_TREE;
5819 if (!bdecl)
5820 return NULL_TREE;
5821 break;
5823 default:
5824 return NULL_TREE;
5827 gcc_assert (suffix != NULL);
5828 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5829 if (!bname)
5830 return NULL_TREE;
5832 strcpy (name, bname + sizeof ("__builtin_") - 1);
5833 strcat (name, suffix);
5835 if (n_args == 1)
5836 fntype = build_function_type_list (type_out, type_in, NULL);
5837 else if (n_args == 2)
5838 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5839 else
5840 gcc_unreachable ();
5842 /* Build a function declaration for the vectorized function. */
5843 new_fndecl = build_decl (BUILTINS_LOCATION,
5844 FUNCTION_DECL, get_identifier (name), fntype);
5845 TREE_PUBLIC (new_fndecl) = 1;
5846 DECL_EXTERNAL (new_fndecl) = 1;
5847 DECL_IS_NOVOPS (new_fndecl) = 1;
5848 TREE_READONLY (new_fndecl) = 1;
5850 return new_fndecl;
5853 /* Returns a function decl for a vectorized version of the builtin function
5854 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5855 if it is not available. */
5857 static tree
5858 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5859 tree type_in)
5861 machine_mode in_mode, out_mode;
5862 int in_n, out_n;
5864 if (TARGET_DEBUG_BUILTIN)
5865 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5866 combined_fn_name (combined_fn (fn)),
5867 GET_MODE_NAME (TYPE_MODE (type_out)),
5868 GET_MODE_NAME (TYPE_MODE (type_in)));
5870 if (TREE_CODE (type_out) != VECTOR_TYPE
5871 || TREE_CODE (type_in) != VECTOR_TYPE)
5872 return NULL_TREE;
5874 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5875 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5876 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5877 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5879 switch (fn)
5881 CASE_CFN_COPYSIGN:
5882 if (VECTOR_UNIT_VSX_P (V2DFmode)
5883 && out_mode == DFmode && out_n == 2
5884 && in_mode == DFmode && in_n == 2)
5885 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5886 if (VECTOR_UNIT_VSX_P (V4SFmode)
5887 && out_mode == SFmode && out_n == 4
5888 && in_mode == SFmode && in_n == 4)
5889 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5890 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5891 && out_mode == SFmode && out_n == 4
5892 && in_mode == SFmode && in_n == 4)
5893 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5894 break;
5895 CASE_CFN_CEIL:
5896 if (VECTOR_UNIT_VSX_P (V2DFmode)
5897 && out_mode == DFmode && out_n == 2
5898 && in_mode == DFmode && in_n == 2)
5899 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5900 if (VECTOR_UNIT_VSX_P (V4SFmode)
5901 && out_mode == SFmode && out_n == 4
5902 && in_mode == SFmode && in_n == 4)
5903 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5904 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5905 && out_mode == SFmode && out_n == 4
5906 && in_mode == SFmode && in_n == 4)
5907 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5908 break;
5909 CASE_CFN_FLOOR:
5910 if (VECTOR_UNIT_VSX_P (V2DFmode)
5911 && out_mode == DFmode && out_n == 2
5912 && in_mode == DFmode && in_n == 2)
5913 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5914 if (VECTOR_UNIT_VSX_P (V4SFmode)
5915 && out_mode == SFmode && out_n == 4
5916 && in_mode == SFmode && in_n == 4)
5917 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5918 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5919 && out_mode == SFmode && out_n == 4
5920 && in_mode == SFmode && in_n == 4)
5921 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5922 break;
5923 CASE_CFN_FMA:
5924 if (VECTOR_UNIT_VSX_P (V2DFmode)
5925 && out_mode == DFmode && out_n == 2
5926 && in_mode == DFmode && in_n == 2)
5927 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5928 if (VECTOR_UNIT_VSX_P (V4SFmode)
5929 && out_mode == SFmode && out_n == 4
5930 && in_mode == SFmode && in_n == 4)
5931 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5932 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5933 && out_mode == SFmode && out_n == 4
5934 && in_mode == SFmode && in_n == 4)
5935 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5936 break;
5937 CASE_CFN_TRUNC:
5938 if (VECTOR_UNIT_VSX_P (V2DFmode)
5939 && out_mode == DFmode && out_n == 2
5940 && in_mode == DFmode && in_n == 2)
5941 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5942 if (VECTOR_UNIT_VSX_P (V4SFmode)
5943 && out_mode == SFmode && out_n == 4
5944 && in_mode == SFmode && in_n == 4)
5945 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5946 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5947 && out_mode == SFmode && out_n == 4
5948 && in_mode == SFmode && in_n == 4)
5949 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5950 break;
5951 CASE_CFN_NEARBYINT:
5952 if (VECTOR_UNIT_VSX_P (V2DFmode)
5953 && flag_unsafe_math_optimizations
5954 && out_mode == DFmode && out_n == 2
5955 && in_mode == DFmode && in_n == 2)
5956 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5957 if (VECTOR_UNIT_VSX_P (V4SFmode)
5958 && flag_unsafe_math_optimizations
5959 && out_mode == SFmode && out_n == 4
5960 && in_mode == SFmode && in_n == 4)
5961 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5962 break;
5963 CASE_CFN_RINT:
5964 if (VECTOR_UNIT_VSX_P (V2DFmode)
5965 && !flag_trapping_math
5966 && out_mode == DFmode && out_n == 2
5967 && in_mode == DFmode && in_n == 2)
5968 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5969 if (VECTOR_UNIT_VSX_P (V4SFmode)
5970 && !flag_trapping_math
5971 && out_mode == SFmode && out_n == 4
5972 && in_mode == SFmode && in_n == 4)
5973 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5974 break;
5975 default:
5976 break;
5979 /* Generate calls to libmass if appropriate. */
5980 if (rs6000_veclib_handler)
5981 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5983 return NULL_TREE;
5986 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5988 static tree
5989 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5990 tree type_in)
5992 machine_mode in_mode, out_mode;
5993 int in_n, out_n;
5995 if (TARGET_DEBUG_BUILTIN)
5996 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5997 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5998 GET_MODE_NAME (TYPE_MODE (type_out)),
5999 GET_MODE_NAME (TYPE_MODE (type_in)));
6001 if (TREE_CODE (type_out) != VECTOR_TYPE
6002 || TREE_CODE (type_in) != VECTOR_TYPE)
6003 return NULL_TREE;
6005 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6006 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6007 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6008 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6010 enum rs6000_builtins fn
6011 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6012 switch (fn)
6014 case RS6000_BUILTIN_RSQRTF:
6015 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6016 && out_mode == SFmode && out_n == 4
6017 && in_mode == SFmode && in_n == 4)
6018 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6019 break;
6020 case RS6000_BUILTIN_RSQRT:
6021 if (VECTOR_UNIT_VSX_P (V2DFmode)
6022 && out_mode == DFmode && out_n == 2
6023 && in_mode == DFmode && in_n == 2)
6024 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6025 break;
6026 case RS6000_BUILTIN_RECIPF:
6027 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6028 && out_mode == SFmode && out_n == 4
6029 && in_mode == SFmode && in_n == 4)
6030 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6031 break;
6032 case RS6000_BUILTIN_RECIP:
6033 if (VECTOR_UNIT_VSX_P (V2DFmode)
6034 && out_mode == DFmode && out_n == 2
6035 && in_mode == DFmode && in_n == 2)
6036 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6037 break;
6038 default:
6039 break;
6041 return NULL_TREE;
6044 /* Default CPU string for rs6000*_file_start functions. */
6045 static const char *rs6000_default_cpu;
6047 /* Do anything needed at the start of the asm file. */
6049 static void
6050 rs6000_file_start (void)
6052 char buffer[80];
6053 const char *start = buffer;
6054 FILE *file = asm_out_file;
6056 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6058 default_file_start ();
6060 if (flag_verbose_asm)
6062 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6064 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6066 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6067 start = "";
6070 if (global_options_set.x_rs6000_cpu_index)
6072 fprintf (file, "%s -mcpu=%s", start,
6073 processor_target_table[rs6000_cpu_index].name);
6074 start = "";
6077 if (global_options_set.x_rs6000_tune_index)
6079 fprintf (file, "%s -mtune=%s", start,
6080 processor_target_table[rs6000_tune_index].name);
6081 start = "";
6084 if (PPC405_ERRATUM77)
6086 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6087 start = "";
6090 #ifdef USING_ELFOS_H
6091 switch (rs6000_sdata)
6093 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6094 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6095 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6096 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6099 if (rs6000_sdata && g_switch_value)
6101 fprintf (file, "%s -G %d", start,
6102 g_switch_value);
6103 start = "";
6105 #endif
6107 if (*start == '\0')
6108 putc ('\n', file);
6111 #ifdef USING_ELFOS_H
6112 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6113 && !global_options_set.x_rs6000_cpu_index)
6115 fputs ("\t.machine ", asm_out_file);
6116 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6117 fputs ("power9\n", asm_out_file);
6118 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6119 fputs ("power8\n", asm_out_file);
6120 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6121 fputs ("power7\n", asm_out_file);
6122 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6123 fputs ("power6\n", asm_out_file);
6124 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6125 fputs ("power5\n", asm_out_file);
6126 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6127 fputs ("power4\n", asm_out_file);
6128 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6129 fputs ("ppc64\n", asm_out_file);
6130 else
6131 fputs ("ppc\n", asm_out_file);
6133 #endif
6135 if (DEFAULT_ABI == ABI_ELFv2)
6136 fprintf (file, "\t.abiversion 2\n");
6140 /* Return nonzero if this function is known to have a null epilogue. */
6143 direct_return (void)
6145 if (reload_completed)
6147 rs6000_stack_t *info = rs6000_stack_info ();
6149 if (info->first_gp_reg_save == 32
6150 && info->first_fp_reg_save == 64
6151 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6152 && ! info->lr_save_p
6153 && ! info->cr_save_p
6154 && info->vrsave_size == 0
6155 && ! info->push_p)
6156 return 1;
6159 return 0;
6162 /* Return the number of instructions it takes to form a constant in an
6163 integer register. */
6166 num_insns_constant_wide (HOST_WIDE_INT value)
6168 /* signed constant loadable with addi */
6169 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6170 return 1;
6172 /* constant loadable with addis */
6173 else if ((value & 0xffff) == 0
6174 && (value >> 31 == -1 || value >> 31 == 0))
6175 return 1;
6177 else if (TARGET_POWERPC64)
6179 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6180 HOST_WIDE_INT high = value >> 31;
6182 if (high == 0 || high == -1)
6183 return 2;
6185 high >>= 1;
6187 if (low == 0)
6188 return num_insns_constant_wide (high) + 1;
6189 else if (high == 0)
6190 return num_insns_constant_wide (low) + 1;
6191 else
6192 return (num_insns_constant_wide (high)
6193 + num_insns_constant_wide (low) + 1);
6196 else
6197 return 2;
6201 num_insns_constant (rtx op, machine_mode mode)
6203 HOST_WIDE_INT low, high;
6205 switch (GET_CODE (op))
6207 case CONST_INT:
6208 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6209 && rs6000_is_valid_and_mask (op, mode))
6210 return 2;
6211 else
6212 return num_insns_constant_wide (INTVAL (op));
6214 case CONST_WIDE_INT:
6216 int i;
6217 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6218 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6219 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6220 return ins;
6223 case CONST_DOUBLE:
6224 if (mode == SFmode || mode == SDmode)
6226 long l;
6228 if (DECIMAL_FLOAT_MODE_P (mode))
6229 REAL_VALUE_TO_TARGET_DECIMAL32
6230 (*CONST_DOUBLE_REAL_VALUE (op), l);
6231 else
6232 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6233 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6236 long l[2];
6237 if (DECIMAL_FLOAT_MODE_P (mode))
6238 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6239 else
6240 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6241 high = l[WORDS_BIG_ENDIAN == 0];
6242 low = l[WORDS_BIG_ENDIAN != 0];
6244 if (TARGET_32BIT)
6245 return (num_insns_constant_wide (low)
6246 + num_insns_constant_wide (high));
6247 else
6249 if ((high == 0 && low >= 0)
6250 || (high == -1 && low < 0))
6251 return num_insns_constant_wide (low);
6253 else if (rs6000_is_valid_and_mask (op, mode))
6254 return 2;
6256 else if (low == 0)
6257 return num_insns_constant_wide (high) + 1;
6259 else
6260 return (num_insns_constant_wide (high)
6261 + num_insns_constant_wide (low) + 1);
6264 default:
6265 gcc_unreachable ();
6269 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6270 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6271 corresponding element of the vector, but for V4SFmode and V2SFmode,
6272 the corresponding "float" is interpreted as an SImode integer. */
6274 HOST_WIDE_INT
6275 const_vector_elt_as_int (rtx op, unsigned int elt)
6277 rtx tmp;
6279 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6280 gcc_assert (GET_MODE (op) != V2DImode
6281 && GET_MODE (op) != V2DFmode);
6283 tmp = CONST_VECTOR_ELT (op, elt);
6284 if (GET_MODE (op) == V4SFmode
6285 || GET_MODE (op) == V2SFmode)
6286 tmp = gen_lowpart (SImode, tmp);
6287 return INTVAL (tmp);
6290 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6291 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6292 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6293 all items are set to the same value and contain COPIES replicas of the
6294 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6295 operand and the others are set to the value of the operand's msb. */
6297 static bool
6298 vspltis_constant (rtx op, unsigned step, unsigned copies)
6300 machine_mode mode = GET_MODE (op);
6301 machine_mode inner = GET_MODE_INNER (mode);
6303 unsigned i;
6304 unsigned nunits;
6305 unsigned bitsize;
6306 unsigned mask;
6308 HOST_WIDE_INT val;
6309 HOST_WIDE_INT splat_val;
6310 HOST_WIDE_INT msb_val;
6312 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6313 return false;
6315 nunits = GET_MODE_NUNITS (mode);
6316 bitsize = GET_MODE_BITSIZE (inner);
6317 mask = GET_MODE_MASK (inner);
6319 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6320 splat_val = val;
6321 msb_val = val >= 0 ? 0 : -1;
6323 /* Construct the value to be splatted, if possible. If not, return 0. */
6324 for (i = 2; i <= copies; i *= 2)
6326 HOST_WIDE_INT small_val;
6327 bitsize /= 2;
6328 small_val = splat_val >> bitsize;
6329 mask >>= bitsize;
6330 if (splat_val != ((HOST_WIDE_INT)
6331 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6332 | (small_val & mask)))
6333 return false;
6334 splat_val = small_val;
6337 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6338 if (EASY_VECTOR_15 (splat_val))
6341 /* Also check if we can splat, and then add the result to itself. Do so if
6342 the value is positive, of if the splat instruction is using OP's mode;
6343 for splat_val < 0, the splat and the add should use the same mode. */
6344 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6345 && (splat_val >= 0 || (step == 1 && copies == 1)))
6348 /* Also check if are loading up the most significant bit which can be done by
6349 loading up -1 and shifting the value left by -1. */
6350 else if (EASY_VECTOR_MSB (splat_val, inner))
6353 else
6354 return false;
6356 /* Check if VAL is present in every STEP-th element, and the
6357 other elements are filled with its most significant bit. */
6358 for (i = 1; i < nunits; ++i)
6360 HOST_WIDE_INT desired_val;
6361 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6362 if ((i & (step - 1)) == 0)
6363 desired_val = val;
6364 else
6365 desired_val = msb_val;
6367 if (desired_val != const_vector_elt_as_int (op, elt))
6368 return false;
6371 return true;
6374 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6375 instruction, filling in the bottom elements with 0 or -1.
6377 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6378 for the number of zeroes to shift in, or negative for the number of 0xff
6379 bytes to shift in.
6381 OP is a CONST_VECTOR. */
6384 vspltis_shifted (rtx op)
6386 machine_mode mode = GET_MODE (op);
6387 machine_mode inner = GET_MODE_INNER (mode);
6389 unsigned i, j;
6390 unsigned nunits;
6391 unsigned mask;
6393 HOST_WIDE_INT val;
6395 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6396 return false;
6398 /* We need to create pseudo registers to do the shift, so don't recognize
6399 shift vector constants after reload. */
6400 if (!can_create_pseudo_p ())
6401 return false;
6403 nunits = GET_MODE_NUNITS (mode);
6404 mask = GET_MODE_MASK (inner);
6406 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6408 /* Check if the value can really be the operand of a vspltis[bhw]. */
6409 if (EASY_VECTOR_15 (val))
6412 /* Also check if we are loading up the most significant bit which can be done
6413 by loading up -1 and shifting the value left by -1. */
6414 else if (EASY_VECTOR_MSB (val, inner))
6417 else
6418 return 0;
6420 /* Check if VAL is present in every STEP-th element until we find elements
6421 that are 0 or all 1 bits. */
6422 for (i = 1; i < nunits; ++i)
6424 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6425 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6427 /* If the value isn't the splat value, check for the remaining elements
6428 being 0/-1. */
6429 if (val != elt_val)
6431 if (elt_val == 0)
6433 for (j = i+1; j < nunits; ++j)
6435 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6436 if (const_vector_elt_as_int (op, elt2) != 0)
6437 return 0;
6440 return (nunits - i) * GET_MODE_SIZE (inner);
6443 else if ((elt_val & mask) == mask)
6445 for (j = i+1; j < nunits; ++j)
6447 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6448 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6449 return 0;
6452 return -((nunits - i) * GET_MODE_SIZE (inner));
6455 else
6456 return 0;
6460 /* If all elements are equal, we don't need to do VLSDOI. */
6461 return 0;
6465 /* Return true if OP is of the given MODE and can be synthesized
6466 with a vspltisb, vspltish or vspltisw. */
6468 bool
6469 easy_altivec_constant (rtx op, machine_mode mode)
6471 unsigned step, copies;
6473 if (mode == VOIDmode)
6474 mode = GET_MODE (op);
6475 else if (mode != GET_MODE (op))
6476 return false;
6478 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6479 constants. */
6480 if (mode == V2DFmode)
6481 return zero_constant (op, mode);
6483 else if (mode == V2DImode)
6485 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6486 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6487 return false;
6489 if (zero_constant (op, mode))
6490 return true;
6492 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6493 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6494 return true;
6496 return false;
6499 /* V1TImode is a special container for TImode. Ignore for now. */
6500 else if (mode == V1TImode)
6501 return false;
6503 /* Start with a vspltisw. */
6504 step = GET_MODE_NUNITS (mode) / 4;
6505 copies = 1;
6507 if (vspltis_constant (op, step, copies))
6508 return true;
6510 /* Then try with a vspltish. */
6511 if (step == 1)
6512 copies <<= 1;
6513 else
6514 step >>= 1;
6516 if (vspltis_constant (op, step, copies))
6517 return true;
6519 /* And finally a vspltisb. */
6520 if (step == 1)
6521 copies <<= 1;
6522 else
6523 step >>= 1;
6525 if (vspltis_constant (op, step, copies))
6526 return true;
6528 if (vspltis_shifted (op) != 0)
6529 return true;
6531 return false;
6534 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6535 result is OP. Abort if it is not possible. */
6538 gen_easy_altivec_constant (rtx op)
6540 machine_mode mode = GET_MODE (op);
6541 int nunits = GET_MODE_NUNITS (mode);
6542 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6543 unsigned step = nunits / 4;
6544 unsigned copies = 1;
6546 /* Start with a vspltisw. */
6547 if (vspltis_constant (op, step, copies))
6548 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6550 /* Then try with a vspltish. */
6551 if (step == 1)
6552 copies <<= 1;
6553 else
6554 step >>= 1;
6556 if (vspltis_constant (op, step, copies))
6557 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6559 /* And finally a vspltisb. */
6560 if (step == 1)
6561 copies <<= 1;
6562 else
6563 step >>= 1;
6565 if (vspltis_constant (op, step, copies))
6566 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6568 gcc_unreachable ();
6571 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6572 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6574 Return the number of instructions needed (1 or 2) into the address pointed
6575 via NUM_INSNS_PTR.
6577 Return the constant that is being split via CONSTANT_PTR. */
6579 bool
6580 xxspltib_constant_p (rtx op,
6581 machine_mode mode,
6582 int *num_insns_ptr,
6583 int *constant_ptr)
6585 size_t nunits = GET_MODE_NUNITS (mode);
6586 size_t i;
6587 HOST_WIDE_INT value;
6588 rtx element;
6590 /* Set the returned values to out of bound values. */
6591 *num_insns_ptr = -1;
6592 *constant_ptr = 256;
6594 if (!TARGET_P9_VECTOR)
6595 return false;
6597 if (mode == VOIDmode)
6598 mode = GET_MODE (op);
6600 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6601 return false;
6603 /* Handle (vec_duplicate <constant>). */
6604 if (GET_CODE (op) == VEC_DUPLICATE)
6606 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6607 && mode != V2DImode)
6608 return false;
6610 element = XEXP (op, 0);
6611 if (!CONST_INT_P (element))
6612 return false;
6614 value = INTVAL (element);
6615 if (!IN_RANGE (value, -128, 127))
6616 return false;
6619 /* Handle (const_vector [...]). */
6620 else if (GET_CODE (op) == CONST_VECTOR)
6622 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6623 && mode != V2DImode)
6624 return false;
6626 element = CONST_VECTOR_ELT (op, 0);
6627 if (!CONST_INT_P (element))
6628 return false;
6630 value = INTVAL (element);
6631 if (!IN_RANGE (value, -128, 127))
6632 return false;
6634 for (i = 1; i < nunits; i++)
6636 element = CONST_VECTOR_ELT (op, i);
6637 if (!CONST_INT_P (element))
6638 return false;
6640 if (value != INTVAL (element))
6641 return false;
6645 /* Handle integer constants being loaded into the upper part of the VSX
6646 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6647 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6648 else if (CONST_INT_P (op))
6650 if (!SCALAR_INT_MODE_P (mode))
6651 return false;
6653 value = INTVAL (op);
6654 if (!IN_RANGE (value, -128, 127))
6655 return false;
6657 if (!IN_RANGE (value, -1, 0))
6659 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6660 return false;
6662 if (EASY_VECTOR_15 (value))
6663 return false;
6667 else
6668 return false;
6670 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6671 sign extend. Special case 0/-1 to allow getting any VSX register instead
6672 of an Altivec register. */
6673 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6674 && EASY_VECTOR_15 (value))
6675 return false;
6677 /* Return # of instructions and the constant byte for XXSPLTIB. */
6678 if (mode == V16QImode)
6679 *num_insns_ptr = 1;
6681 else if (IN_RANGE (value, -1, 0))
6682 *num_insns_ptr = 1;
6684 else
6685 *num_insns_ptr = 2;
6687 *constant_ptr = (int) value;
6688 return true;
6691 const char *
6692 output_vec_const_move (rtx *operands)
6694 int shift;
6695 machine_mode mode;
6696 rtx dest, vec;
6698 dest = operands[0];
6699 vec = operands[1];
6700 mode = GET_MODE (dest);
6702 if (TARGET_VSX)
6704 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6705 int xxspltib_value = 256;
6706 int num_insns = -1;
6708 if (zero_constant (vec, mode))
6710 if (TARGET_P9_VECTOR)
6711 return "xxspltib %x0,0";
6713 else if (dest_vmx_p)
6714 return "vspltisw %0,0";
6716 else
6717 return "xxlxor %x0,%x0,%x0";
6720 if (all_ones_constant (vec, mode))
6722 if (TARGET_P9_VECTOR)
6723 return "xxspltib %x0,255";
6725 else if (dest_vmx_p)
6726 return "vspltisw %0,-1";
6728 else if (TARGET_P8_VECTOR)
6729 return "xxlorc %x0,%x0,%x0";
6731 else
6732 gcc_unreachable ();
6735 if (TARGET_P9_VECTOR
6736 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6738 if (num_insns == 1)
6740 operands[2] = GEN_INT (xxspltib_value & 0xff);
6741 return "xxspltib %x0,%2";
6744 return "#";
6748 if (TARGET_ALTIVEC)
6750 rtx splat_vec;
6752 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6753 if (zero_constant (vec, mode))
6754 return "vspltisw %0,0";
6756 if (all_ones_constant (vec, mode))
6757 return "vspltisw %0,-1";
6759 /* Do we need to construct a value using VSLDOI? */
6760 shift = vspltis_shifted (vec);
6761 if (shift != 0)
6762 return "#";
6764 splat_vec = gen_easy_altivec_constant (vec);
6765 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6766 operands[1] = XEXP (splat_vec, 0);
6767 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6768 return "#";
6770 switch (GET_MODE (splat_vec))
6772 case E_V4SImode:
6773 return "vspltisw %0,%1";
6775 case E_V8HImode:
6776 return "vspltish %0,%1";
6778 case E_V16QImode:
6779 return "vspltisb %0,%1";
6781 default:
6782 gcc_unreachable ();
6786 gcc_unreachable ();
6789 /* Initialize TARGET of vector PAIRED to VALS. */
6791 void
6792 paired_expand_vector_init (rtx target, rtx vals)
6794 machine_mode mode = GET_MODE (target);
6795 int n_elts = GET_MODE_NUNITS (mode);
6796 int n_var = 0;
6797 rtx x, new_rtx, tmp, constant_op, op1, op2;
6798 int i;
6800 for (i = 0; i < n_elts; ++i)
6802 x = XVECEXP (vals, 0, i);
6803 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6804 ++n_var;
6806 if (n_var == 0)
6808 /* Load from constant pool. */
6809 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6810 return;
6813 if (n_var == 2)
6815 /* The vector is initialized only with non-constants. */
6816 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6817 XVECEXP (vals, 0, 1));
6819 emit_move_insn (target, new_rtx);
6820 return;
6823 /* One field is non-constant and the other one is a constant. Load the
6824 constant from the constant pool and use ps_merge instruction to
6825 construct the whole vector. */
6826 op1 = XVECEXP (vals, 0, 0);
6827 op2 = XVECEXP (vals, 0, 1);
6829 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6831 tmp = gen_reg_rtx (GET_MODE (constant_op));
6832 emit_move_insn (tmp, constant_op);
6834 if (CONSTANT_P (op1))
6835 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6836 else
6837 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6839 emit_move_insn (target, new_rtx);
6842 void
6843 paired_expand_vector_move (rtx operands[])
6845 rtx op0 = operands[0], op1 = operands[1];
6847 emit_move_insn (op0, op1);
6850 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6851 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6852 operands for the relation operation COND. This is a recursive
6853 function. */
6855 static void
6856 paired_emit_vector_compare (enum rtx_code rcode,
6857 rtx dest, rtx op0, rtx op1,
6858 rtx cc_op0, rtx cc_op1)
6860 rtx tmp = gen_reg_rtx (V2SFmode);
6861 rtx tmp1, max, min;
6863 gcc_assert (TARGET_PAIRED_FLOAT);
6864 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6866 switch (rcode)
6868 case LT:
6869 case LTU:
6870 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6871 return;
6872 case GE:
6873 case GEU:
6874 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6875 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6876 return;
6877 case LE:
6878 case LEU:
6879 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6880 return;
6881 case GT:
6882 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6883 return;
6884 case EQ:
6885 tmp1 = gen_reg_rtx (V2SFmode);
6886 max = gen_reg_rtx (V2SFmode);
6887 min = gen_reg_rtx (V2SFmode);
6888 gen_reg_rtx (V2SFmode);
6890 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6891 emit_insn (gen_selv2sf4
6892 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6893 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6894 emit_insn (gen_selv2sf4
6895 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6896 emit_insn (gen_subv2sf3 (tmp1, min, max));
6897 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6898 return;
6899 case NE:
6900 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6901 return;
6902 case UNLE:
6903 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6904 return;
6905 case UNLT:
6906 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6907 return;
6908 case UNGE:
6909 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6910 return;
6911 case UNGT:
6912 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6913 return;
6914 default:
6915 gcc_unreachable ();
6918 return;
6921 /* Emit vector conditional expression.
6922 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6923 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6926 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6927 rtx cond, rtx cc_op0, rtx cc_op1)
6929 enum rtx_code rcode = GET_CODE (cond);
6931 if (!TARGET_PAIRED_FLOAT)
6932 return 0;
6934 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6936 return 1;
6939 /* Initialize vector TARGET to VALS. */
6941 void
6942 rs6000_expand_vector_init (rtx target, rtx vals)
6944 machine_mode mode = GET_MODE (target);
6945 machine_mode inner_mode = GET_MODE_INNER (mode);
6946 int n_elts = GET_MODE_NUNITS (mode);
6947 int n_var = 0, one_var = -1;
6948 bool all_same = true, all_const_zero = true;
6949 rtx x, mem;
6950 int i;
6952 for (i = 0; i < n_elts; ++i)
6954 x = XVECEXP (vals, 0, i);
6955 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6956 ++n_var, one_var = i;
6957 else if (x != CONST0_RTX (inner_mode))
6958 all_const_zero = false;
6960 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6961 all_same = false;
6964 if (n_var == 0)
6966 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6967 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6968 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6970 /* Zero register. */
6971 emit_move_insn (target, CONST0_RTX (mode));
6972 return;
6974 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6976 /* Splat immediate. */
6977 emit_insn (gen_rtx_SET (target, const_vec));
6978 return;
6980 else
6982 /* Load from constant pool. */
6983 emit_move_insn (target, const_vec);
6984 return;
6988 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6989 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6991 rtx op[2];
6992 size_t i;
6993 size_t num_elements = all_same ? 1 : 2;
6994 for (i = 0; i < num_elements; i++)
6996 op[i] = XVECEXP (vals, 0, i);
6997 /* Just in case there is a SUBREG with a smaller mode, do a
6998 conversion. */
6999 if (GET_MODE (op[i]) != inner_mode)
7001 rtx tmp = gen_reg_rtx (inner_mode);
7002 convert_move (tmp, op[i], 0);
7003 op[i] = tmp;
7005 /* Allow load with splat double word. */
7006 else if (MEM_P (op[i]))
7008 if (!all_same)
7009 op[i] = force_reg (inner_mode, op[i]);
7011 else if (!REG_P (op[i]))
7012 op[i] = force_reg (inner_mode, op[i]);
7015 if (all_same)
7017 if (mode == V2DFmode)
7018 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7019 else
7020 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7022 else
7024 if (mode == V2DFmode)
7025 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7026 else
7027 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7029 return;
7032 /* Special case initializing vector int if we are on 64-bit systems with
7033 direct move or we have the ISA 3.0 instructions. */
7034 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7035 && TARGET_DIRECT_MOVE_64BIT)
7037 if (all_same)
7039 rtx element0 = XVECEXP (vals, 0, 0);
7040 if (MEM_P (element0))
7041 element0 = rs6000_address_for_fpconvert (element0);
7042 else
7043 element0 = force_reg (SImode, element0);
7045 if (TARGET_P9_VECTOR)
7046 emit_insn (gen_vsx_splat_v4si (target, element0));
7047 else
7049 rtx tmp = gen_reg_rtx (DImode);
7050 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7051 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7053 return;
7055 else
7057 rtx elements[4];
7058 size_t i;
7060 for (i = 0; i < 4; i++)
7062 elements[i] = XVECEXP (vals, 0, i);
7063 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7064 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7067 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7068 elements[2], elements[3]));
7069 return;
7073 /* With single precision floating point on VSX, know that internally single
7074 precision is actually represented as a double, and either make 2 V2DF
7075 vectors, and convert these vectors to single precision, or do one
7076 conversion, and splat the result to the other elements. */
7077 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7079 if (all_same)
7081 rtx element0 = XVECEXP (vals, 0, 0);
7083 if (TARGET_P9_VECTOR)
7085 if (MEM_P (element0))
7086 element0 = rs6000_address_for_fpconvert (element0);
7088 emit_insn (gen_vsx_splat_v4sf (target, element0));
7091 else
7093 rtx freg = gen_reg_rtx (V4SFmode);
7094 rtx sreg = force_reg (SFmode, element0);
7095 rtx cvt = (TARGET_XSCVDPSPN
7096 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7097 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7099 emit_insn (cvt);
7100 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7101 const0_rtx));
7104 else
7106 rtx dbl_even = gen_reg_rtx (V2DFmode);
7107 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7108 rtx flt_even = gen_reg_rtx (V4SFmode);
7109 rtx flt_odd = gen_reg_rtx (V4SFmode);
7110 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7111 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7112 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7113 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7115 /* Use VMRGEW if we can instead of doing a permute. */
7116 if (TARGET_P8_VECTOR)
7118 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7119 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7120 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7121 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7122 if (BYTES_BIG_ENDIAN)
7123 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7124 else
7125 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7127 else
7129 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7130 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7131 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7132 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7133 rs6000_expand_extract_even (target, flt_even, flt_odd);
7136 return;
7139 /* Special case initializing vector short/char that are splats if we are on
7140 64-bit systems with direct move. */
7141 if (all_same && TARGET_DIRECT_MOVE_64BIT
7142 && (mode == V16QImode || mode == V8HImode))
7144 rtx op0 = XVECEXP (vals, 0, 0);
7145 rtx di_tmp = gen_reg_rtx (DImode);
7147 if (!REG_P (op0))
7148 op0 = force_reg (GET_MODE_INNER (mode), op0);
7150 if (mode == V16QImode)
7152 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7153 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7154 return;
7157 if (mode == V8HImode)
7159 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7160 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7161 return;
7165 /* Store value to stack temp. Load vector element. Splat. However, splat
7166 of 64-bit items is not supported on Altivec. */
7167 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7169 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7170 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7171 XVECEXP (vals, 0, 0));
7172 x = gen_rtx_UNSPEC (VOIDmode,
7173 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7174 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7175 gen_rtvec (2,
7176 gen_rtx_SET (target, mem),
7177 x)));
7178 x = gen_rtx_VEC_SELECT (inner_mode, target,
7179 gen_rtx_PARALLEL (VOIDmode,
7180 gen_rtvec (1, const0_rtx)));
7181 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7182 return;
7185 /* One field is non-constant. Load constant then overwrite
7186 varying field. */
7187 if (n_var == 1)
7189 rtx copy = copy_rtx (vals);
7191 /* Load constant part of vector, substitute neighboring value for
7192 varying element. */
7193 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7194 rs6000_expand_vector_init (target, copy);
7196 /* Insert variable. */
7197 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7198 return;
7201 /* Construct the vector in memory one field at a time
7202 and load the whole vector. */
7203 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7204 for (i = 0; i < n_elts; i++)
7205 emit_move_insn (adjust_address_nv (mem, inner_mode,
7206 i * GET_MODE_SIZE (inner_mode)),
7207 XVECEXP (vals, 0, i));
7208 emit_move_insn (target, mem);
7211 /* Set field ELT of TARGET to VAL. */
7213 void
7214 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7216 machine_mode mode = GET_MODE (target);
7217 machine_mode inner_mode = GET_MODE_INNER (mode);
7218 rtx reg = gen_reg_rtx (mode);
7219 rtx mask, mem, x;
7220 int width = GET_MODE_SIZE (inner_mode);
7221 int i;
7223 val = force_reg (GET_MODE (val), val);
7225 if (VECTOR_MEM_VSX_P (mode))
7227 rtx insn = NULL_RTX;
7228 rtx elt_rtx = GEN_INT (elt);
7230 if (mode == V2DFmode)
7231 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7233 else if (mode == V2DImode)
7234 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7236 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7238 if (mode == V4SImode)
7239 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7240 else if (mode == V8HImode)
7241 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7242 else if (mode == V16QImode)
7243 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7244 else if (mode == V4SFmode)
7245 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7248 if (insn)
7250 emit_insn (insn);
7251 return;
7255 /* Simplify setting single element vectors like V1TImode. */
7256 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7258 emit_move_insn (target, gen_lowpart (mode, val));
7259 return;
7262 /* Load single variable value. */
7263 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7264 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7265 x = gen_rtx_UNSPEC (VOIDmode,
7266 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7267 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7268 gen_rtvec (2,
7269 gen_rtx_SET (reg, mem),
7270 x)));
7272 /* Linear sequence. */
7273 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7274 for (i = 0; i < 16; ++i)
7275 XVECEXP (mask, 0, i) = GEN_INT (i);
7277 /* Set permute mask to insert element into target. */
7278 for (i = 0; i < width; ++i)
7279 XVECEXP (mask, 0, elt*width + i)
7280 = GEN_INT (i + 0x10);
7281 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7283 if (BYTES_BIG_ENDIAN)
7284 x = gen_rtx_UNSPEC (mode,
7285 gen_rtvec (3, target, reg,
7286 force_reg (V16QImode, x)),
7287 UNSPEC_VPERM);
7288 else
7290 if (TARGET_P9_VECTOR)
7291 x = gen_rtx_UNSPEC (mode,
7292 gen_rtvec (3, reg, target,
7293 force_reg (V16QImode, x)),
7294 UNSPEC_VPERMR);
7295 else
7297 /* Invert selector. We prefer to generate VNAND on P8 so
7298 that future fusion opportunities can kick in, but must
7299 generate VNOR elsewhere. */
7300 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7301 rtx iorx = (TARGET_P8_VECTOR
7302 ? gen_rtx_IOR (V16QImode, notx, notx)
7303 : gen_rtx_AND (V16QImode, notx, notx));
7304 rtx tmp = gen_reg_rtx (V16QImode);
7305 emit_insn (gen_rtx_SET (tmp, iorx));
7307 /* Permute with operands reversed and adjusted selector. */
7308 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7309 UNSPEC_VPERM);
7313 emit_insn (gen_rtx_SET (target, x));
7316 /* Extract field ELT from VEC into TARGET. */
7318 void
7319 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7321 machine_mode mode = GET_MODE (vec);
7322 machine_mode inner_mode = GET_MODE_INNER (mode);
7323 rtx mem;
7325 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7327 switch (mode)
7329 default:
7330 break;
7331 case E_V1TImode:
7332 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7333 emit_move_insn (target, gen_lowpart (TImode, vec));
7334 break;
7335 case E_V2DFmode:
7336 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7337 return;
7338 case E_V2DImode:
7339 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7340 return;
7341 case E_V4SFmode:
7342 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7343 return;
7344 case E_V16QImode:
7345 if (TARGET_DIRECT_MOVE_64BIT)
7347 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7348 return;
7350 else
7351 break;
7352 case E_V8HImode:
7353 if (TARGET_DIRECT_MOVE_64BIT)
7355 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7356 return;
7358 else
7359 break;
7360 case E_V4SImode:
7361 if (TARGET_DIRECT_MOVE_64BIT)
7363 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7364 return;
7366 break;
7369 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7370 && TARGET_DIRECT_MOVE_64BIT)
7372 if (GET_MODE (elt) != DImode)
7374 rtx tmp = gen_reg_rtx (DImode);
7375 convert_move (tmp, elt, 0);
7376 elt = tmp;
7378 else if (!REG_P (elt))
7379 elt = force_reg (DImode, elt);
7381 switch (mode)
7383 case E_V2DFmode:
7384 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7385 return;
7387 case E_V2DImode:
7388 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7389 return;
7391 case E_V4SFmode:
7392 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7393 return;
7395 case E_V4SImode:
7396 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7397 return;
7399 case E_V8HImode:
7400 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7401 return;
7403 case E_V16QImode:
7404 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7405 return;
7407 default:
7408 gcc_unreachable ();
7412 gcc_assert (CONST_INT_P (elt));
7414 /* Allocate mode-sized buffer. */
7415 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7417 emit_move_insn (mem, vec);
7419 /* Add offset to field within buffer matching vector element. */
7420 mem = adjust_address_nv (mem, inner_mode,
7421 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7423 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7426 /* Helper function to return the register number of a RTX. */
7427 static inline int
7428 regno_or_subregno (rtx op)
7430 if (REG_P (op))
7431 return REGNO (op);
7432 else if (SUBREG_P (op))
7433 return subreg_regno (op);
7434 else
7435 gcc_unreachable ();
7438 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7439 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7440 temporary (BASE_TMP) to fixup the address. Return the new memory address
7441 that is valid for reads or writes to a given register (SCALAR_REG). */
7444 rs6000_adjust_vec_address (rtx scalar_reg,
7445 rtx mem,
7446 rtx element,
7447 rtx base_tmp,
7448 machine_mode scalar_mode)
7450 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7451 rtx addr = XEXP (mem, 0);
7452 rtx element_offset;
7453 rtx new_addr;
7454 bool valid_addr_p;
7456 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7457 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7459 /* Calculate what we need to add to the address to get the element
7460 address. */
7461 if (CONST_INT_P (element))
7462 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7463 else
7465 int byte_shift = exact_log2 (scalar_size);
7466 gcc_assert (byte_shift >= 0);
7468 if (byte_shift == 0)
7469 element_offset = element;
7471 else
7473 if (TARGET_POWERPC64)
7474 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7475 else
7476 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7478 element_offset = base_tmp;
7482 /* Create the new address pointing to the element within the vector. If we
7483 are adding 0, we don't have to change the address. */
7484 if (element_offset == const0_rtx)
7485 new_addr = addr;
7487 /* A simple indirect address can be converted into a reg + offset
7488 address. */
7489 else if (REG_P (addr) || SUBREG_P (addr))
7490 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7492 /* Optimize D-FORM addresses with constant offset with a constant element, to
7493 include the element offset in the address directly. */
7494 else if (GET_CODE (addr) == PLUS)
7496 rtx op0 = XEXP (addr, 0);
7497 rtx op1 = XEXP (addr, 1);
7498 rtx insn;
7500 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7501 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7503 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7504 rtx offset_rtx = GEN_INT (offset);
7506 if (IN_RANGE (offset, -32768, 32767)
7507 && (scalar_size < 8 || (offset & 0x3) == 0))
7508 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7509 else
7511 emit_move_insn (base_tmp, offset_rtx);
7512 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7515 else
7517 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7518 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7520 /* Note, ADDI requires the register being added to be a base
7521 register. If the register was R0, load it up into the temporary
7522 and do the add. */
7523 if (op1_reg_p
7524 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7526 insn = gen_add3_insn (base_tmp, op1, element_offset);
7527 gcc_assert (insn != NULL_RTX);
7528 emit_insn (insn);
7531 else if (ele_reg_p
7532 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7534 insn = gen_add3_insn (base_tmp, element_offset, op1);
7535 gcc_assert (insn != NULL_RTX);
7536 emit_insn (insn);
7539 else
7541 emit_move_insn (base_tmp, op1);
7542 emit_insn (gen_add2_insn (base_tmp, element_offset));
7545 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7549 else
7551 emit_move_insn (base_tmp, addr);
7552 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7555 /* If we have a PLUS, we need to see whether the particular register class
7556 allows for D-FORM or X-FORM addressing. */
7557 if (GET_CODE (new_addr) == PLUS)
7559 rtx op1 = XEXP (new_addr, 1);
7560 addr_mask_type addr_mask;
7561 int scalar_regno = regno_or_subregno (scalar_reg);
7563 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7564 if (INT_REGNO_P (scalar_regno))
7565 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7567 else if (FP_REGNO_P (scalar_regno))
7568 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7570 else if (ALTIVEC_REGNO_P (scalar_regno))
7571 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7573 else
7574 gcc_unreachable ();
7576 if (REG_P (op1) || SUBREG_P (op1))
7577 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7578 else
7579 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7582 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7583 valid_addr_p = true;
7585 else
7586 valid_addr_p = false;
7588 if (!valid_addr_p)
7590 emit_move_insn (base_tmp, new_addr);
7591 new_addr = base_tmp;
7594 return change_address (mem, scalar_mode, new_addr);
7597 /* Split a variable vec_extract operation into the component instructions. */
7599 void
7600 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7601 rtx tmp_altivec)
7603 machine_mode mode = GET_MODE (src);
7604 machine_mode scalar_mode = GET_MODE (dest);
7605 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7606 int byte_shift = exact_log2 (scalar_size);
7608 gcc_assert (byte_shift >= 0);
7610 /* If we are given a memory address, optimize to load just the element. We
7611 don't have to adjust the vector element number on little endian
7612 systems. */
7613 if (MEM_P (src))
7615 gcc_assert (REG_P (tmp_gpr));
7616 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7617 tmp_gpr, scalar_mode));
7618 return;
7621 else if (REG_P (src) || SUBREG_P (src))
7623 int bit_shift = byte_shift + 3;
7624 rtx element2;
7625 int dest_regno = regno_or_subregno (dest);
7626 int src_regno = regno_or_subregno (src);
7627 int element_regno = regno_or_subregno (element);
7629 gcc_assert (REG_P (tmp_gpr));
7631 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7632 a general purpose register. */
7633 if (TARGET_P9_VECTOR
7634 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7635 && INT_REGNO_P (dest_regno)
7636 && ALTIVEC_REGNO_P (src_regno)
7637 && INT_REGNO_P (element_regno))
7639 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7640 rtx element_si = gen_rtx_REG (SImode, element_regno);
7642 if (mode == V16QImode)
7643 emit_insn (VECTOR_ELT_ORDER_BIG
7644 ? gen_vextublx (dest_si, element_si, src)
7645 : gen_vextubrx (dest_si, element_si, src));
7647 else if (mode == V8HImode)
7649 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7650 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7651 emit_insn (VECTOR_ELT_ORDER_BIG
7652 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7653 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7657 else
7659 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7660 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7661 emit_insn (VECTOR_ELT_ORDER_BIG
7662 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7663 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7666 return;
7670 gcc_assert (REG_P (tmp_altivec));
7672 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7673 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7674 will shift the element into the upper position (adding 3 to convert a
7675 byte shift into a bit shift). */
7676 if (scalar_size == 8)
7678 if (!VECTOR_ELT_ORDER_BIG)
7680 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7681 element2 = tmp_gpr;
7683 else
7684 element2 = element;
7686 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7687 bit. */
7688 emit_insn (gen_rtx_SET (tmp_gpr,
7689 gen_rtx_AND (DImode,
7690 gen_rtx_ASHIFT (DImode,
7691 element2,
7692 GEN_INT (6)),
7693 GEN_INT (64))));
7695 else
7697 if (!VECTOR_ELT_ORDER_BIG)
7699 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7701 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7702 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7703 element2 = tmp_gpr;
7705 else
7706 element2 = element;
7708 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7711 /* Get the value into the lower byte of the Altivec register where VSLO
7712 expects it. */
7713 if (TARGET_P9_VECTOR)
7714 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7715 else if (can_create_pseudo_p ())
7716 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7717 else
7719 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7720 emit_move_insn (tmp_di, tmp_gpr);
7721 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7724 /* Do the VSLO to get the value into the final location. */
7725 switch (mode)
7727 case E_V2DFmode:
7728 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7729 return;
7731 case E_V2DImode:
7732 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7733 return;
7735 case E_V4SFmode:
7737 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7738 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7739 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7740 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7741 tmp_altivec));
7743 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7744 return;
7747 case E_V4SImode:
7748 case E_V8HImode:
7749 case E_V16QImode:
7751 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7752 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7753 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7754 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7755 tmp_altivec));
7756 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7757 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7758 GEN_INT (64 - (8 * scalar_size))));
7759 return;
7762 default:
7763 gcc_unreachable ();
7766 return;
7768 else
7769 gcc_unreachable ();
7772 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7773 two SImode values. */
7775 static void
7776 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7778 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7780 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7782 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7783 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7785 emit_move_insn (dest, GEN_INT (const1 | const2));
7786 return;
7789 /* Put si1 into upper 32-bits of dest. */
7790 if (CONST_INT_P (si1))
7791 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7792 else
7794 /* Generate RLDIC. */
7795 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7796 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7797 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7798 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7799 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7800 emit_insn (gen_rtx_SET (dest, and_rtx));
7803 /* Put si2 into the temporary. */
7804 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7805 if (CONST_INT_P (si2))
7806 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7807 else
7808 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7810 /* Combine the two parts. */
7811 emit_insn (gen_iordi3 (dest, dest, tmp));
7812 return;
7815 /* Split a V4SI initialization. */
7817 void
7818 rs6000_split_v4si_init (rtx operands[])
7820 rtx dest = operands[0];
7822 /* Destination is a GPR, build up the two DImode parts in place. */
7823 if (REG_P (dest) || SUBREG_P (dest))
7825 int d_regno = regno_or_subregno (dest);
7826 rtx scalar1 = operands[1];
7827 rtx scalar2 = operands[2];
7828 rtx scalar3 = operands[3];
7829 rtx scalar4 = operands[4];
7830 rtx tmp1 = operands[5];
7831 rtx tmp2 = operands[6];
7833 /* Even though we only need one temporary (plus the destination, which
7834 has an early clobber constraint, try to use two temporaries, one for
7835 each double word created. That way the 2nd insn scheduling pass can
7836 rearrange things so the two parts are done in parallel. */
7837 if (BYTES_BIG_ENDIAN)
7839 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7840 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7841 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7842 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7844 else
7846 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7847 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7848 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7849 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7850 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7852 return;
7855 else
7856 gcc_unreachable ();
7859 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7860 selects whether the alignment is abi mandated, optional, or
7861 both abi and optional alignment. */
7863 unsigned int
7864 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7866 if (how != align_opt)
7868 if (TREE_CODE (type) == VECTOR_TYPE)
7870 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7872 if (align < 64)
7873 align = 64;
7875 else if (align < 128)
7876 align = 128;
7880 if (how != align_abi)
7882 if (TREE_CODE (type) == ARRAY_TYPE
7883 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7885 if (align < BITS_PER_WORD)
7886 align = BITS_PER_WORD;
7890 return align;
7893 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7894 instructions simply ignore the low bits; VSX memory instructions
7895 are aligned to 4 or 8 bytes. */
7897 static bool
7898 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7900 return (STRICT_ALIGNMENT
7901 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7902 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7903 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7904 && (int) align < VECTOR_ALIGN (mode)))));
7907 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7909 bool
7910 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7912 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7914 if (computed != 128)
7916 static bool warned;
7917 if (!warned && warn_psabi)
7919 warned = true;
7920 inform (input_location,
7921 "the layout of aggregates containing vectors with"
7922 " %d-byte alignment has changed in GCC 5",
7923 computed / BITS_PER_UNIT);
7926 /* In current GCC there is no special case. */
7927 return false;
7930 return false;
7933 /* AIX increases natural record alignment to doubleword if the first
7934 field is an FP double while the FP fields remain word aligned. */
7936 unsigned int
7937 rs6000_special_round_type_align (tree type, unsigned int computed,
7938 unsigned int specified)
7940 unsigned int align = MAX (computed, specified);
7941 tree field = TYPE_FIELDS (type);
7943 /* Skip all non field decls */
7944 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7945 field = DECL_CHAIN (field);
7947 if (field != NULL && field != type)
7949 type = TREE_TYPE (field);
7950 while (TREE_CODE (type) == ARRAY_TYPE)
7951 type = TREE_TYPE (type);
7953 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7954 align = MAX (align, 64);
7957 return align;
7960 /* Darwin increases record alignment to the natural alignment of
7961 the first field. */
7963 unsigned int
7964 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7965 unsigned int specified)
7967 unsigned int align = MAX (computed, specified);
7969 if (TYPE_PACKED (type))
7970 return align;
7972 /* Find the first field, looking down into aggregates. */
7973 do {
7974 tree field = TYPE_FIELDS (type);
7975 /* Skip all non field decls */
7976 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7977 field = DECL_CHAIN (field);
7978 if (! field)
7979 break;
7980 /* A packed field does not contribute any extra alignment. */
7981 if (DECL_PACKED (field))
7982 return align;
7983 type = TREE_TYPE (field);
7984 while (TREE_CODE (type) == ARRAY_TYPE)
7985 type = TREE_TYPE (type);
7986 } while (AGGREGATE_TYPE_P (type));
7988 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7989 align = MAX (align, TYPE_ALIGN (type));
7991 return align;
7994 /* Return 1 for an operand in small memory on V.4/eabi. */
7997 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7998 machine_mode mode ATTRIBUTE_UNUSED)
8000 #if TARGET_ELF
8001 rtx sym_ref;
8003 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8004 return 0;
8006 if (DEFAULT_ABI != ABI_V4)
8007 return 0;
8009 if (GET_CODE (op) == SYMBOL_REF)
8010 sym_ref = op;
8012 else if (GET_CODE (op) != CONST
8013 || GET_CODE (XEXP (op, 0)) != PLUS
8014 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8015 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8016 return 0;
8018 else
8020 rtx sum = XEXP (op, 0);
8021 HOST_WIDE_INT summand;
8023 /* We have to be careful here, because it is the referenced address
8024 that must be 32k from _SDA_BASE_, not just the symbol. */
8025 summand = INTVAL (XEXP (sum, 1));
8026 if (summand < 0 || summand > g_switch_value)
8027 return 0;
8029 sym_ref = XEXP (sum, 0);
8032 return SYMBOL_REF_SMALL_P (sym_ref);
8033 #else
8034 return 0;
8035 #endif
8038 /* Return true if either operand is a general purpose register. */
8040 bool
8041 gpr_or_gpr_p (rtx op0, rtx op1)
8043 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8044 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8047 /* Return true if this is a move direct operation between GPR registers and
8048 floating point/VSX registers. */
8050 bool
8051 direct_move_p (rtx op0, rtx op1)
8053 int regno0, regno1;
8055 if (!REG_P (op0) || !REG_P (op1))
8056 return false;
8058 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8059 return false;
8061 regno0 = REGNO (op0);
8062 regno1 = REGNO (op1);
8063 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8064 return false;
8066 if (INT_REGNO_P (regno0))
8067 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8069 else if (INT_REGNO_P (regno1))
8071 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8072 return true;
8074 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8075 return true;
8078 return false;
8081 /* Return true if the OFFSET is valid for the quad address instructions that
8082 use d-form (register + offset) addressing. */
8084 static inline bool
8085 quad_address_offset_p (HOST_WIDE_INT offset)
8087 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8090 /* Return true if the ADDR is an acceptable address for a quad memory
8091 operation of mode MODE (either LQ/STQ for general purpose registers, or
8092 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8093 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8094 3.0 LXV/STXV instruction. */
8096 bool
8097 quad_address_p (rtx addr, machine_mode mode, bool strict)
8099 rtx op0, op1;
8101 if (GET_MODE_SIZE (mode) != 16)
8102 return false;
8104 if (legitimate_indirect_address_p (addr, strict))
8105 return true;
8107 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8108 return false;
8110 if (GET_CODE (addr) != PLUS)
8111 return false;
8113 op0 = XEXP (addr, 0);
8114 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8115 return false;
8117 op1 = XEXP (addr, 1);
8118 if (!CONST_INT_P (op1))
8119 return false;
8121 return quad_address_offset_p (INTVAL (op1));
8124 /* Return true if this is a load or store quad operation. This function does
8125 not handle the atomic quad memory instructions. */
8127 bool
8128 quad_load_store_p (rtx op0, rtx op1)
8130 bool ret;
8132 if (!TARGET_QUAD_MEMORY)
8133 ret = false;
8135 else if (REG_P (op0) && MEM_P (op1))
8136 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8137 && quad_memory_operand (op1, GET_MODE (op1))
8138 && !reg_overlap_mentioned_p (op0, op1));
8140 else if (MEM_P (op0) && REG_P (op1))
8141 ret = (quad_memory_operand (op0, GET_MODE (op0))
8142 && quad_int_reg_operand (op1, GET_MODE (op1)));
8144 else
8145 ret = false;
8147 if (TARGET_DEBUG_ADDR)
8149 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8150 ret ? "true" : "false");
8151 debug_rtx (gen_rtx_SET (op0, op1));
8154 return ret;
8157 /* Given an address, return a constant offset term if one exists. */
8159 static rtx
8160 address_offset (rtx op)
8162 if (GET_CODE (op) == PRE_INC
8163 || GET_CODE (op) == PRE_DEC)
8164 op = XEXP (op, 0);
8165 else if (GET_CODE (op) == PRE_MODIFY
8166 || GET_CODE (op) == LO_SUM)
8167 op = XEXP (op, 1);
8169 if (GET_CODE (op) == CONST)
8170 op = XEXP (op, 0);
8172 if (GET_CODE (op) == PLUS)
8173 op = XEXP (op, 1);
8175 if (CONST_INT_P (op))
8176 return op;
8178 return NULL_RTX;
8181 /* Return true if the MEM operand is a memory operand suitable for use
8182 with a (full width, possibly multiple) gpr load/store. On
8183 powerpc64 this means the offset must be divisible by 4.
8184 Implements 'Y' constraint.
8186 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8187 a constraint function we know the operand has satisfied a suitable
8188 memory predicate. Also accept some odd rtl generated by reload
8189 (see rs6000_legitimize_reload_address for various forms). It is
8190 important that reload rtl be accepted by appropriate constraints
8191 but not by the operand predicate.
8193 Offsetting a lo_sum should not be allowed, except where we know by
8194 alignment that a 32k boundary is not crossed, but see the ???
8195 comment in rs6000_legitimize_reload_address. Note that by
8196 "offsetting" here we mean a further offset to access parts of the
8197 MEM. It's fine to have a lo_sum where the inner address is offset
8198 from a sym, since the same sym+offset will appear in the high part
8199 of the address calculation. */
8201 bool
8202 mem_operand_gpr (rtx op, machine_mode mode)
8204 unsigned HOST_WIDE_INT offset;
8205 int extra;
8206 rtx addr = XEXP (op, 0);
8208 op = address_offset (addr);
8209 if (op == NULL_RTX)
8210 return true;
8212 offset = INTVAL (op);
8213 if (TARGET_POWERPC64 && (offset & 3) != 0)
8214 return false;
8216 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8217 if (extra < 0)
8218 extra = 0;
8220 if (GET_CODE (addr) == LO_SUM)
8221 /* For lo_sum addresses, we must allow any offset except one that
8222 causes a wrap, so test only the low 16 bits. */
8223 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8225 return offset + 0x8000 < 0x10000u - extra;
8228 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8229 enforce an offset divisible by 4 even for 32-bit. */
8231 bool
8232 mem_operand_ds_form (rtx op, machine_mode mode)
8234 unsigned HOST_WIDE_INT offset;
8235 int extra;
8236 rtx addr = XEXP (op, 0);
8238 if (!offsettable_address_p (false, mode, addr))
8239 return false;
8241 op = address_offset (addr);
8242 if (op == NULL_RTX)
8243 return true;
8245 offset = INTVAL (op);
8246 if ((offset & 3) != 0)
8247 return false;
8249 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8250 if (extra < 0)
8251 extra = 0;
8253 if (GET_CODE (addr) == LO_SUM)
8254 /* For lo_sum addresses, we must allow any offset except one that
8255 causes a wrap, so test only the low 16 bits. */
8256 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8258 return offset + 0x8000 < 0x10000u - extra;
8261 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8263 static bool
8264 reg_offset_addressing_ok_p (machine_mode mode)
8266 switch (mode)
8268 case E_V16QImode:
8269 case E_V8HImode:
8270 case E_V4SFmode:
8271 case E_V4SImode:
8272 case E_V2DFmode:
8273 case E_V2DImode:
8274 case E_V1TImode:
8275 case E_TImode:
8276 case E_TFmode:
8277 case E_KFmode:
8278 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8279 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8280 a vector mode, if we want to use the VSX registers to move it around,
8281 we need to restrict ourselves to reg+reg addressing. Similarly for
8282 IEEE 128-bit floating point that is passed in a single vector
8283 register. */
8284 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8285 return mode_supports_vsx_dform_quad (mode);
8286 break;
8288 case E_V2SImode:
8289 case E_V2SFmode:
8290 /* Paired vector modes. Only reg+reg addressing is valid. */
8291 if (TARGET_PAIRED_FLOAT)
8292 return false;
8293 break;
8295 case E_SDmode:
8296 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8297 addressing for the LFIWZX and STFIWX instructions. */
8298 if (TARGET_NO_SDMODE_STACK)
8299 return false;
8300 break;
8302 default:
8303 break;
8306 return true;
8309 static bool
8310 virtual_stack_registers_memory_p (rtx op)
8312 int regnum;
8314 if (GET_CODE (op) == REG)
8315 regnum = REGNO (op);
8317 else if (GET_CODE (op) == PLUS
8318 && GET_CODE (XEXP (op, 0)) == REG
8319 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8320 regnum = REGNO (XEXP (op, 0));
8322 else
8323 return false;
8325 return (regnum >= FIRST_VIRTUAL_REGISTER
8326 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8329 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8330 is known to not straddle a 32k boundary. This function is used
8331 to determine whether -mcmodel=medium code can use TOC pointer
8332 relative addressing for OP. This means the alignment of the TOC
8333 pointer must also be taken into account, and unfortunately that is
8334 only 8 bytes. */
8336 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8337 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8338 #endif
8340 static bool
8341 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8342 machine_mode mode)
8344 tree decl;
8345 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8347 if (GET_CODE (op) != SYMBOL_REF)
8348 return false;
8350 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8351 SYMBOL_REF. */
8352 if (mode_supports_vsx_dform_quad (mode))
8353 return false;
8355 dsize = GET_MODE_SIZE (mode);
8356 decl = SYMBOL_REF_DECL (op);
8357 if (!decl)
8359 if (dsize == 0)
8360 return false;
8362 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8363 replacing memory addresses with an anchor plus offset. We
8364 could find the decl by rummaging around in the block->objects
8365 VEC for the given offset but that seems like too much work. */
8366 dalign = BITS_PER_UNIT;
8367 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8368 && SYMBOL_REF_ANCHOR_P (op)
8369 && SYMBOL_REF_BLOCK (op) != NULL)
8371 struct object_block *block = SYMBOL_REF_BLOCK (op);
8373 dalign = block->alignment;
8374 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8376 else if (CONSTANT_POOL_ADDRESS_P (op))
8378 /* It would be nice to have get_pool_align().. */
8379 machine_mode cmode = get_pool_mode (op);
8381 dalign = GET_MODE_ALIGNMENT (cmode);
8384 else if (DECL_P (decl))
8386 dalign = DECL_ALIGN (decl);
8388 if (dsize == 0)
8390 /* Allow BLKmode when the entire object is known to not
8391 cross a 32k boundary. */
8392 if (!DECL_SIZE_UNIT (decl))
8393 return false;
8395 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8396 return false;
8398 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8399 if (dsize > 32768)
8400 return false;
8402 dalign /= BITS_PER_UNIT;
8403 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8404 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8405 return dalign >= dsize;
8408 else
8409 gcc_unreachable ();
8411 /* Find how many bits of the alignment we know for this access. */
8412 dalign /= BITS_PER_UNIT;
8413 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8414 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8415 mask = dalign - 1;
8416 lsb = offset & -offset;
8417 mask &= lsb - 1;
8418 dalign = mask + 1;
8420 return dalign >= dsize;
8423 static bool
8424 constant_pool_expr_p (rtx op)
8426 rtx base, offset;
8428 split_const (op, &base, &offset);
8429 return (GET_CODE (base) == SYMBOL_REF
8430 && CONSTANT_POOL_ADDRESS_P (base)
8431 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8434 /* These are only used to pass through from print_operand/print_operand_address
8435 to rs6000_output_addr_const_extra over the intervening function
8436 output_addr_const which is not target code. */
8437 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8439 /* Return true if OP is a toc pointer relative address (the output
8440 of create_TOC_reference). If STRICT, do not match non-split
8441 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8442 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8443 TOCREL_OFFSET_RET respectively. */
8445 bool
8446 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8447 const_rtx *tocrel_offset_ret)
8449 if (!TARGET_TOC)
8450 return false;
8452 if (TARGET_CMODEL != CMODEL_SMALL)
8454 /* When strict ensure we have everything tidy. */
8455 if (strict
8456 && !(GET_CODE (op) == LO_SUM
8457 && REG_P (XEXP (op, 0))
8458 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8459 return false;
8461 /* When not strict, allow non-split TOC addresses and also allow
8462 (lo_sum (high ..)) TOC addresses created during reload. */
8463 if (GET_CODE (op) == LO_SUM)
8464 op = XEXP (op, 1);
8467 const_rtx tocrel_base = op;
8468 const_rtx tocrel_offset = const0_rtx;
8470 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8472 tocrel_base = XEXP (op, 0);
8473 tocrel_offset = XEXP (op, 1);
8476 if (tocrel_base_ret)
8477 *tocrel_base_ret = tocrel_base;
8478 if (tocrel_offset_ret)
8479 *tocrel_offset_ret = tocrel_offset;
8481 return (GET_CODE (tocrel_base) == UNSPEC
8482 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8485 /* Return true if X is a constant pool address, and also for cmodel=medium
8486 if X is a toc-relative address known to be offsettable within MODE. */
8488 bool
8489 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8490 bool strict)
8492 const_rtx tocrel_base, tocrel_offset;
8493 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8494 && (TARGET_CMODEL != CMODEL_MEDIUM
8495 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8496 || mode == QImode
8497 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8498 INTVAL (tocrel_offset), mode)));
8501 static bool
8502 legitimate_small_data_p (machine_mode mode, rtx x)
8504 return (DEFAULT_ABI == ABI_V4
8505 && !flag_pic && !TARGET_TOC
8506 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8507 && small_data_operand (x, mode));
8510 bool
8511 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8512 bool strict, bool worst_case)
8514 unsigned HOST_WIDE_INT offset;
8515 unsigned int extra;
8517 if (GET_CODE (x) != PLUS)
8518 return false;
8519 if (!REG_P (XEXP (x, 0)))
8520 return false;
8521 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8522 return false;
8523 if (mode_supports_vsx_dform_quad (mode))
8524 return quad_address_p (x, mode, strict);
8525 if (!reg_offset_addressing_ok_p (mode))
8526 return virtual_stack_registers_memory_p (x);
8527 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8528 return true;
8529 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8530 return false;
8532 offset = INTVAL (XEXP (x, 1));
8533 extra = 0;
8534 switch (mode)
8536 case E_V2SImode:
8537 case E_V2SFmode:
8538 /* Paired single modes: offset addressing isn't valid. */
8539 return false;
8541 case E_DFmode:
8542 case E_DDmode:
8543 case E_DImode:
8544 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8545 addressing. */
8546 if (VECTOR_MEM_VSX_P (mode))
8547 return false;
8549 if (!worst_case)
8550 break;
8551 if (!TARGET_POWERPC64)
8552 extra = 4;
8553 else if (offset & 3)
8554 return false;
8555 break;
8557 case E_TFmode:
8558 case E_IFmode:
8559 case E_KFmode:
8560 case E_TDmode:
8561 case E_TImode:
8562 case E_PTImode:
8563 extra = 8;
8564 if (!worst_case)
8565 break;
8566 if (!TARGET_POWERPC64)
8567 extra = 12;
8568 else if (offset & 3)
8569 return false;
8570 break;
8572 default:
8573 break;
8576 offset += 0x8000;
8577 return offset < 0x10000 - extra;
8580 bool
8581 legitimate_indexed_address_p (rtx x, int strict)
8583 rtx op0, op1;
8585 if (GET_CODE (x) != PLUS)
8586 return false;
8588 op0 = XEXP (x, 0);
8589 op1 = XEXP (x, 1);
8591 return (REG_P (op0) && REG_P (op1)
8592 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8593 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8594 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8595 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8598 bool
8599 avoiding_indexed_address_p (machine_mode mode)
8601 /* Avoid indexed addressing for modes that have non-indexed
8602 load/store instruction forms. */
8603 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8606 bool
8607 legitimate_indirect_address_p (rtx x, int strict)
8609 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8612 bool
8613 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8615 if (!TARGET_MACHO || !flag_pic
8616 || mode != SImode || GET_CODE (x) != MEM)
8617 return false;
8618 x = XEXP (x, 0);
8620 if (GET_CODE (x) != LO_SUM)
8621 return false;
8622 if (GET_CODE (XEXP (x, 0)) != REG)
8623 return false;
8624 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8625 return false;
8626 x = XEXP (x, 1);
8628 return CONSTANT_P (x);
8631 static bool
8632 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8634 if (GET_CODE (x) != LO_SUM)
8635 return false;
8636 if (GET_CODE (XEXP (x, 0)) != REG)
8637 return false;
8638 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8639 return false;
8640 /* quad word addresses are restricted, and we can't use LO_SUM. */
8641 if (mode_supports_vsx_dform_quad (mode))
8642 return false;
8643 x = XEXP (x, 1);
8645 if (TARGET_ELF || TARGET_MACHO)
8647 bool large_toc_ok;
8649 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8650 return false;
8651 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8652 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8653 recognizes some LO_SUM addresses as valid although this
8654 function says opposite. In most cases, LRA through different
8655 transformations can generate correct code for address reloads.
8656 It can not manage only some LO_SUM cases. So we need to add
8657 code analogous to one in rs6000_legitimize_reload_address for
8658 LOW_SUM here saying that some addresses are still valid. */
8659 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8660 && small_toc_ref (x, VOIDmode));
8661 if (TARGET_TOC && ! large_toc_ok)
8662 return false;
8663 if (GET_MODE_NUNITS (mode) != 1)
8664 return false;
8665 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8666 && !(/* ??? Assume floating point reg based on mode? */
8667 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8668 && (mode == DFmode || mode == DDmode)))
8669 return false;
8671 return CONSTANT_P (x) || large_toc_ok;
8674 return false;
8678 /* Try machine-dependent ways of modifying an illegitimate address
8679 to be legitimate. If we find one, return the new, valid address.
8680 This is used from only one place: `memory_address' in explow.c.
8682 OLDX is the address as it was before break_out_memory_refs was
8683 called. In some cases it is useful to look at this to decide what
8684 needs to be done.
8686 It is always safe for this function to do nothing. It exists to
8687 recognize opportunities to optimize the output.
8689 On RS/6000, first check for the sum of a register with a constant
8690 integer that is out of range. If so, generate code to add the
8691 constant with the low-order 16 bits masked to the register and force
8692 this result into another register (this can be done with `cau').
8693 Then generate an address of REG+(CONST&0xffff), allowing for the
8694 possibility of bit 16 being a one.
8696 Then check for the sum of a register and something not constant, try to
8697 load the other things into a register and return the sum. */
8699 static rtx
8700 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8701 machine_mode mode)
8703 unsigned int extra;
8705 if (!reg_offset_addressing_ok_p (mode)
8706 || mode_supports_vsx_dform_quad (mode))
8708 if (virtual_stack_registers_memory_p (x))
8709 return x;
8711 /* In theory we should not be seeing addresses of the form reg+0,
8712 but just in case it is generated, optimize it away. */
8713 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8714 return force_reg (Pmode, XEXP (x, 0));
8716 /* For TImode with load/store quad, restrict addresses to just a single
8717 pointer, so it works with both GPRs and VSX registers. */
8718 /* Make sure both operands are registers. */
8719 else if (GET_CODE (x) == PLUS
8720 && (mode != TImode || !TARGET_VSX))
8721 return gen_rtx_PLUS (Pmode,
8722 force_reg (Pmode, XEXP (x, 0)),
8723 force_reg (Pmode, XEXP (x, 1)));
8724 else
8725 return force_reg (Pmode, x);
8727 if (GET_CODE (x) == SYMBOL_REF)
8729 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8730 if (model != 0)
8731 return rs6000_legitimize_tls_address (x, model);
8734 extra = 0;
8735 switch (mode)
8737 case E_TFmode:
8738 case E_TDmode:
8739 case E_TImode:
8740 case E_PTImode:
8741 case E_IFmode:
8742 case E_KFmode:
8743 /* As in legitimate_offset_address_p we do not assume
8744 worst-case. The mode here is just a hint as to the registers
8745 used. A TImode is usually in gprs, but may actually be in
8746 fprs. Leave worst-case scenario for reload to handle via
8747 insn constraints. PTImode is only GPRs. */
8748 extra = 8;
8749 break;
8750 default:
8751 break;
8754 if (GET_CODE (x) == PLUS
8755 && GET_CODE (XEXP (x, 0)) == REG
8756 && GET_CODE (XEXP (x, 1)) == CONST_INT
8757 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8758 >= 0x10000 - extra)
8759 && !PAIRED_VECTOR_MODE (mode))
8761 HOST_WIDE_INT high_int, low_int;
8762 rtx sum;
8763 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8764 if (low_int >= 0x8000 - extra)
8765 low_int = 0;
8766 high_int = INTVAL (XEXP (x, 1)) - low_int;
8767 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8768 GEN_INT (high_int)), 0);
8769 return plus_constant (Pmode, sum, low_int);
8771 else if (GET_CODE (x) == PLUS
8772 && GET_CODE (XEXP (x, 0)) == REG
8773 && GET_CODE (XEXP (x, 1)) != CONST_INT
8774 && GET_MODE_NUNITS (mode) == 1
8775 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8776 || (/* ??? Assume floating point reg based on mode? */
8777 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8778 && (mode == DFmode || mode == DDmode)))
8779 && !avoiding_indexed_address_p (mode))
8781 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8782 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8784 else if (PAIRED_VECTOR_MODE (mode))
8786 if (mode == DImode)
8787 return x;
8788 /* We accept [reg + reg]. */
8790 if (GET_CODE (x) == PLUS)
8792 rtx op1 = XEXP (x, 0);
8793 rtx op2 = XEXP (x, 1);
8794 rtx y;
8796 op1 = force_reg (Pmode, op1);
8797 op2 = force_reg (Pmode, op2);
8799 /* We can't always do [reg + reg] for these, because [reg +
8800 reg + offset] is not a legitimate addressing mode. */
8801 y = gen_rtx_PLUS (Pmode, op1, op2);
8803 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8804 return force_reg (Pmode, y);
8805 else
8806 return y;
8809 return force_reg (Pmode, x);
8811 else if ((TARGET_ELF
8812 #if TARGET_MACHO
8813 || !MACHO_DYNAMIC_NO_PIC_P
8814 #endif
8816 && TARGET_32BIT
8817 && TARGET_NO_TOC
8818 && ! flag_pic
8819 && GET_CODE (x) != CONST_INT
8820 && GET_CODE (x) != CONST_WIDE_INT
8821 && GET_CODE (x) != CONST_DOUBLE
8822 && CONSTANT_P (x)
8823 && GET_MODE_NUNITS (mode) == 1
8824 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8825 || (/* ??? Assume floating point reg based on mode? */
8826 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8827 && (mode == DFmode || mode == DDmode))))
8829 rtx reg = gen_reg_rtx (Pmode);
8830 if (TARGET_ELF)
8831 emit_insn (gen_elf_high (reg, x));
8832 else
8833 emit_insn (gen_macho_high (reg, x));
8834 return gen_rtx_LO_SUM (Pmode, reg, x);
8836 else if (TARGET_TOC
8837 && GET_CODE (x) == SYMBOL_REF
8838 && constant_pool_expr_p (x)
8839 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8840 return create_TOC_reference (x, NULL_RTX);
8841 else
8842 return x;
8845 /* Debug version of rs6000_legitimize_address. */
8846 static rtx
8847 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8849 rtx ret;
8850 rtx_insn *insns;
8852 start_sequence ();
8853 ret = rs6000_legitimize_address (x, oldx, mode);
8854 insns = get_insns ();
8855 end_sequence ();
8857 if (ret != x)
8859 fprintf (stderr,
8860 "\nrs6000_legitimize_address: mode %s, old code %s, "
8861 "new code %s, modified\n",
8862 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8863 GET_RTX_NAME (GET_CODE (ret)));
8865 fprintf (stderr, "Original address:\n");
8866 debug_rtx (x);
8868 fprintf (stderr, "oldx:\n");
8869 debug_rtx (oldx);
8871 fprintf (stderr, "New address:\n");
8872 debug_rtx (ret);
8874 if (insns)
8876 fprintf (stderr, "Insns added:\n");
8877 debug_rtx_list (insns, 20);
8880 else
8882 fprintf (stderr,
8883 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8884 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8886 debug_rtx (x);
8889 if (insns)
8890 emit_insn (insns);
8892 return ret;
8895 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8896 We need to emit DTP-relative relocations. */
8898 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8899 static void
8900 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8902 switch (size)
8904 case 4:
8905 fputs ("\t.long\t", file);
8906 break;
8907 case 8:
8908 fputs (DOUBLE_INT_ASM_OP, file);
8909 break;
8910 default:
8911 gcc_unreachable ();
8913 output_addr_const (file, x);
8914 if (TARGET_ELF)
8915 fputs ("@dtprel+0x8000", file);
8916 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8918 switch (SYMBOL_REF_TLS_MODEL (x))
8920 case 0:
8921 break;
8922 case TLS_MODEL_LOCAL_EXEC:
8923 fputs ("@le", file);
8924 break;
8925 case TLS_MODEL_INITIAL_EXEC:
8926 fputs ("@ie", file);
8927 break;
8928 case TLS_MODEL_GLOBAL_DYNAMIC:
8929 case TLS_MODEL_LOCAL_DYNAMIC:
8930 fputs ("@m", file);
8931 break;
8932 default:
8933 gcc_unreachable ();
8938 /* Return true if X is a symbol that refers to real (rather than emulated)
8939 TLS. */
8941 static bool
8942 rs6000_real_tls_symbol_ref_p (rtx x)
8944 return (GET_CODE (x) == SYMBOL_REF
8945 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8948 /* In the name of slightly smaller debug output, and to cater to
8949 general assembler lossage, recognize various UNSPEC sequences
8950 and turn them back into a direct symbol reference. */
8952 static rtx
8953 rs6000_delegitimize_address (rtx orig_x)
8955 rtx x, y, offset;
8957 orig_x = delegitimize_mem_from_attrs (orig_x);
8958 x = orig_x;
8959 if (MEM_P (x))
8960 x = XEXP (x, 0);
8962 y = x;
8963 if (TARGET_CMODEL != CMODEL_SMALL
8964 && GET_CODE (y) == LO_SUM)
8965 y = XEXP (y, 1);
8967 offset = NULL_RTX;
8968 if (GET_CODE (y) == PLUS
8969 && GET_MODE (y) == Pmode
8970 && CONST_INT_P (XEXP (y, 1)))
8972 offset = XEXP (y, 1);
8973 y = XEXP (y, 0);
8976 if (GET_CODE (y) == UNSPEC
8977 && XINT (y, 1) == UNSPEC_TOCREL)
8979 y = XVECEXP (y, 0, 0);
8981 #ifdef HAVE_AS_TLS
8982 /* Do not associate thread-local symbols with the original
8983 constant pool symbol. */
8984 if (TARGET_XCOFF
8985 && GET_CODE (y) == SYMBOL_REF
8986 && CONSTANT_POOL_ADDRESS_P (y)
8987 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8988 return orig_x;
8989 #endif
8991 if (offset != NULL_RTX)
8992 y = gen_rtx_PLUS (Pmode, y, offset);
8993 if (!MEM_P (orig_x))
8994 return y;
8995 else
8996 return replace_equiv_address_nv (orig_x, y);
8999 if (TARGET_MACHO
9000 && GET_CODE (orig_x) == LO_SUM
9001 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9003 y = XEXP (XEXP (orig_x, 1), 0);
9004 if (GET_CODE (y) == UNSPEC
9005 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9006 return XVECEXP (y, 0, 0);
9009 return orig_x;
9012 /* Return true if X shouldn't be emitted into the debug info.
9013 The linker doesn't like .toc section references from
9014 .debug_* sections, so reject .toc section symbols. */
9016 static bool
9017 rs6000_const_not_ok_for_debug_p (rtx x)
9019 if (GET_CODE (x) == UNSPEC)
9020 return true;
9021 if (GET_CODE (x) == SYMBOL_REF
9022 && CONSTANT_POOL_ADDRESS_P (x))
9024 rtx c = get_pool_constant (x);
9025 machine_mode cmode = get_pool_mode (x);
9026 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9027 return true;
9030 return false;
9034 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9036 static bool
9037 rs6000_legitimate_combined_insn (rtx_insn *insn)
9039 int icode = INSN_CODE (insn);
9041 /* Reject creating doloop insns. Combine should not be allowed
9042 to create these for a number of reasons:
9043 1) In a nested loop, if combine creates one of these in an
9044 outer loop and the register allocator happens to allocate ctr
9045 to the outer loop insn, then the inner loop can't use ctr.
9046 Inner loops ought to be more highly optimized.
9047 2) Combine often wants to create one of these from what was
9048 originally a three insn sequence, first combining the three
9049 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9050 allocated ctr, the splitter takes use back to the three insn
9051 sequence. It's better to stop combine at the two insn
9052 sequence.
9053 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9054 insns, the register allocator sometimes uses floating point
9055 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9056 jump insn and output reloads are not implemented for jumps,
9057 the ctrsi/ctrdi splitters need to handle all possible cases.
9058 That's a pain, and it gets to be seriously difficult when a
9059 splitter that runs after reload needs memory to transfer from
9060 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9061 for the difficult case. It's better to not create problems
9062 in the first place. */
9063 if (icode != CODE_FOR_nothing
9064 && (icode == CODE_FOR_bdz_si
9065 || icode == CODE_FOR_bdz_di
9066 || icode == CODE_FOR_bdnz_si
9067 || icode == CODE_FOR_bdnz_di
9068 || icode == CODE_FOR_bdztf_si
9069 || icode == CODE_FOR_bdztf_di
9070 || icode == CODE_FOR_bdnztf_si
9071 || icode == CODE_FOR_bdnztf_di))
9072 return false;
9074 return true;
9077 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9079 static GTY(()) rtx rs6000_tls_symbol;
9080 static rtx
9081 rs6000_tls_get_addr (void)
9083 if (!rs6000_tls_symbol)
9084 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9086 return rs6000_tls_symbol;
9089 /* Construct the SYMBOL_REF for TLS GOT references. */
9091 static GTY(()) rtx rs6000_got_symbol;
9092 static rtx
9093 rs6000_got_sym (void)
9095 if (!rs6000_got_symbol)
9097 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9098 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9099 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9102 return rs6000_got_symbol;
9105 /* AIX Thread-Local Address support. */
9107 static rtx
9108 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9110 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9111 const char *name;
9112 char *tlsname;
9114 name = XSTR (addr, 0);
9115 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9116 or the symbol will be in TLS private data section. */
9117 if (name[strlen (name) - 1] != ']'
9118 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9119 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9121 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9122 strcpy (tlsname, name);
9123 strcat (tlsname,
9124 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9125 tlsaddr = copy_rtx (addr);
9126 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9128 else
9129 tlsaddr = addr;
9131 /* Place addr into TOC constant pool. */
9132 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9134 /* Output the TOC entry and create the MEM referencing the value. */
9135 if (constant_pool_expr_p (XEXP (sym, 0))
9136 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9138 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9139 mem = gen_const_mem (Pmode, tocref);
9140 set_mem_alias_set (mem, get_TOC_alias_set ());
9142 else
9143 return sym;
9145 /* Use global-dynamic for local-dynamic. */
9146 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9147 || model == TLS_MODEL_LOCAL_DYNAMIC)
9149 /* Create new TOC reference for @m symbol. */
9150 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9151 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9152 strcpy (tlsname, "*LCM");
9153 strcat (tlsname, name + 3);
9154 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9155 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9156 tocref = create_TOC_reference (modaddr, NULL_RTX);
9157 rtx modmem = gen_const_mem (Pmode, tocref);
9158 set_mem_alias_set (modmem, get_TOC_alias_set ());
9160 rtx modreg = gen_reg_rtx (Pmode);
9161 emit_insn (gen_rtx_SET (modreg, modmem));
9163 tmpreg = gen_reg_rtx (Pmode);
9164 emit_insn (gen_rtx_SET (tmpreg, mem));
9166 dest = gen_reg_rtx (Pmode);
9167 if (TARGET_32BIT)
9168 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9169 else
9170 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9171 return dest;
9173 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9174 else if (TARGET_32BIT)
9176 tlsreg = gen_reg_rtx (SImode);
9177 emit_insn (gen_tls_get_tpointer (tlsreg));
9179 else
9180 tlsreg = gen_rtx_REG (DImode, 13);
9182 /* Load the TOC value into temporary register. */
9183 tmpreg = gen_reg_rtx (Pmode);
9184 emit_insn (gen_rtx_SET (tmpreg, mem));
9185 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9186 gen_rtx_MINUS (Pmode, addr, tlsreg));
9188 /* Add TOC symbol value to TLS pointer. */
9189 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9191 return dest;
9194 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9195 this (thread-local) address. */
9197 static rtx
9198 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9200 rtx dest, insn;
9202 if (TARGET_XCOFF)
9203 return rs6000_legitimize_tls_address_aix (addr, model);
9205 dest = gen_reg_rtx (Pmode);
9206 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9208 rtx tlsreg;
9210 if (TARGET_64BIT)
9212 tlsreg = gen_rtx_REG (Pmode, 13);
9213 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9215 else
9217 tlsreg = gen_rtx_REG (Pmode, 2);
9218 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9220 emit_insn (insn);
9222 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9224 rtx tlsreg, tmp;
9226 tmp = gen_reg_rtx (Pmode);
9227 if (TARGET_64BIT)
9229 tlsreg = gen_rtx_REG (Pmode, 13);
9230 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9232 else
9234 tlsreg = gen_rtx_REG (Pmode, 2);
9235 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9237 emit_insn (insn);
9238 if (TARGET_64BIT)
9239 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9240 else
9241 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9242 emit_insn (insn);
9244 else
9246 rtx r3, got, tga, tmp1, tmp2, call_insn;
9248 /* We currently use relocations like @got@tlsgd for tls, which
9249 means the linker will handle allocation of tls entries, placing
9250 them in the .got section. So use a pointer to the .got section,
9251 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9252 or to secondary GOT sections used by 32-bit -fPIC. */
9253 if (TARGET_64BIT)
9254 got = gen_rtx_REG (Pmode, 2);
9255 else
9257 if (flag_pic == 1)
9258 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9259 else
9261 rtx gsym = rs6000_got_sym ();
9262 got = gen_reg_rtx (Pmode);
9263 if (flag_pic == 0)
9264 rs6000_emit_move (got, gsym, Pmode);
9265 else
9267 rtx mem, lab;
9269 tmp1 = gen_reg_rtx (Pmode);
9270 tmp2 = gen_reg_rtx (Pmode);
9271 mem = gen_const_mem (Pmode, tmp1);
9272 lab = gen_label_rtx ();
9273 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9274 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9275 if (TARGET_LINK_STACK)
9276 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9277 emit_move_insn (tmp2, mem);
9278 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9279 set_unique_reg_note (last, REG_EQUAL, gsym);
9284 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9286 tga = rs6000_tls_get_addr ();
9287 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9288 const0_rtx, Pmode);
9290 r3 = gen_rtx_REG (Pmode, 3);
9291 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9293 if (TARGET_64BIT)
9294 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9295 else
9296 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9298 else if (DEFAULT_ABI == ABI_V4)
9299 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9300 else
9301 gcc_unreachable ();
9302 call_insn = last_call_insn ();
9303 PATTERN (call_insn) = insn;
9304 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9305 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9306 pic_offset_table_rtx);
9308 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9310 tga = rs6000_tls_get_addr ();
9311 tmp1 = gen_reg_rtx (Pmode);
9312 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9313 const0_rtx, Pmode);
9315 r3 = gen_rtx_REG (Pmode, 3);
9316 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9318 if (TARGET_64BIT)
9319 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9320 else
9321 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9323 else if (DEFAULT_ABI == ABI_V4)
9324 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9325 else
9326 gcc_unreachable ();
9327 call_insn = last_call_insn ();
9328 PATTERN (call_insn) = insn;
9329 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9330 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9331 pic_offset_table_rtx);
9333 if (rs6000_tls_size == 16)
9335 if (TARGET_64BIT)
9336 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9337 else
9338 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9340 else if (rs6000_tls_size == 32)
9342 tmp2 = gen_reg_rtx (Pmode);
9343 if (TARGET_64BIT)
9344 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9345 else
9346 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9347 emit_insn (insn);
9348 if (TARGET_64BIT)
9349 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9350 else
9351 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9353 else
9355 tmp2 = gen_reg_rtx (Pmode);
9356 if (TARGET_64BIT)
9357 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9358 else
9359 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9360 emit_insn (insn);
9361 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9363 emit_insn (insn);
9365 else
9367 /* IE, or 64-bit offset LE. */
9368 tmp2 = gen_reg_rtx (Pmode);
9369 if (TARGET_64BIT)
9370 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9371 else
9372 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9373 emit_insn (insn);
9374 if (TARGET_64BIT)
9375 insn = gen_tls_tls_64 (dest, tmp2, addr);
9376 else
9377 insn = gen_tls_tls_32 (dest, tmp2, addr);
9378 emit_insn (insn);
9382 return dest;
9385 /* Only create the global variable for the stack protect guard if we are using
9386 the global flavor of that guard. */
9387 static tree
9388 rs6000_init_stack_protect_guard (void)
9390 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9391 return default_stack_protect_guard ();
9393 return NULL_TREE;
9396 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9398 static bool
9399 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9401 if (GET_CODE (x) == HIGH
9402 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9403 return true;
9405 /* A TLS symbol in the TOC cannot contain a sum. */
9406 if (GET_CODE (x) == CONST
9407 && GET_CODE (XEXP (x, 0)) == PLUS
9408 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9409 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9410 return true;
9412 /* Do not place an ELF TLS symbol in the constant pool. */
9413 return TARGET_ELF && tls_referenced_p (x);
9416 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9417 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9418 can be addressed relative to the toc pointer. */
9420 static bool
9421 use_toc_relative_ref (rtx sym, machine_mode mode)
9423 return ((constant_pool_expr_p (sym)
9424 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9425 get_pool_mode (sym)))
9426 || (TARGET_CMODEL == CMODEL_MEDIUM
9427 && SYMBOL_REF_LOCAL_P (sym)
9428 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9431 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9432 replace the input X, or the original X if no replacement is called for.
9433 The output parameter *WIN is 1 if the calling macro should goto WIN,
9434 0 if it should not.
9436 For RS/6000, we wish to handle large displacements off a base
9437 register by splitting the addend across an addiu/addis and the mem insn.
9438 This cuts number of extra insns needed from 3 to 1.
9440 On Darwin, we use this to generate code for floating point constants.
9441 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9442 The Darwin code is inside #if TARGET_MACHO because only then are the
9443 machopic_* functions defined. */
9444 static rtx
9445 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9446 int opnum, int type,
9447 int ind_levels ATTRIBUTE_UNUSED, int *win)
9449 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9450 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9452 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9453 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9454 if (reg_offset_p
9455 && opnum == 1
9456 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9457 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9458 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9459 && TARGET_P9_VECTOR)
9460 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9461 && TARGET_P9_VECTOR)))
9462 reg_offset_p = false;
9464 /* We must recognize output that we have already generated ourselves. */
9465 if (GET_CODE (x) == PLUS
9466 && GET_CODE (XEXP (x, 0)) == PLUS
9467 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9468 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9469 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9471 if (TARGET_DEBUG_ADDR)
9473 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9474 debug_rtx (x);
9476 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9477 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9478 opnum, (enum reload_type) type);
9479 *win = 1;
9480 return x;
9483 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9484 if (GET_CODE (x) == LO_SUM
9485 && GET_CODE (XEXP (x, 0)) == HIGH)
9487 if (TARGET_DEBUG_ADDR)
9489 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9490 debug_rtx (x);
9492 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9493 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9494 opnum, (enum reload_type) type);
9495 *win = 1;
9496 return x;
9499 #if TARGET_MACHO
9500 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9501 && GET_CODE (x) == LO_SUM
9502 && GET_CODE (XEXP (x, 0)) == PLUS
9503 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9504 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9505 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9506 && machopic_operand_p (XEXP (x, 1)))
9508 /* Result of previous invocation of this function on Darwin
9509 floating point constant. */
9510 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9511 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9512 opnum, (enum reload_type) type);
9513 *win = 1;
9514 return x;
9516 #endif
9518 if (TARGET_CMODEL != CMODEL_SMALL
9519 && reg_offset_p
9520 && !quad_offset_p
9521 && small_toc_ref (x, VOIDmode))
9523 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9524 x = gen_rtx_LO_SUM (Pmode, hi, x);
9525 if (TARGET_DEBUG_ADDR)
9527 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9528 debug_rtx (x);
9530 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9531 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9532 opnum, (enum reload_type) type);
9533 *win = 1;
9534 return x;
9537 if (GET_CODE (x) == PLUS
9538 && REG_P (XEXP (x, 0))
9539 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9540 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9541 && CONST_INT_P (XEXP (x, 1))
9542 && reg_offset_p
9543 && !PAIRED_VECTOR_MODE (mode)
9544 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9546 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9547 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9548 HOST_WIDE_INT high
9549 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9551 /* Check for 32-bit overflow or quad addresses with one of the
9552 four least significant bits set. */
9553 if (high + low != val
9554 || (quad_offset_p && (low & 0xf)))
9556 *win = 0;
9557 return x;
9560 /* Reload the high part into a base reg; leave the low part
9561 in the mem directly. */
9563 x = gen_rtx_PLUS (GET_MODE (x),
9564 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9565 GEN_INT (high)),
9566 GEN_INT (low));
9568 if (TARGET_DEBUG_ADDR)
9570 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9571 debug_rtx (x);
9573 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9574 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9575 opnum, (enum reload_type) type);
9576 *win = 1;
9577 return x;
9580 if (GET_CODE (x) == SYMBOL_REF
9581 && reg_offset_p
9582 && !quad_offset_p
9583 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9584 && !PAIRED_VECTOR_MODE (mode)
9585 #if TARGET_MACHO
9586 && DEFAULT_ABI == ABI_DARWIN
9587 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9588 && machopic_symbol_defined_p (x)
9589 #else
9590 && DEFAULT_ABI == ABI_V4
9591 && !flag_pic
9592 #endif
9593 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9594 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9595 without fprs.
9596 ??? Assume floating point reg based on mode? This assumption is
9597 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9598 where reload ends up doing a DFmode load of a constant from
9599 mem using two gprs. Unfortunately, at this point reload
9600 hasn't yet selected regs so poking around in reload data
9601 won't help and even if we could figure out the regs reliably,
9602 we'd still want to allow this transformation when the mem is
9603 naturally aligned. Since we say the address is good here, we
9604 can't disable offsets from LO_SUMs in mem_operand_gpr.
9605 FIXME: Allow offset from lo_sum for other modes too, when
9606 mem is sufficiently aligned.
9608 Also disallow this if the type can go in VMX/Altivec registers, since
9609 those registers do not have d-form (reg+offset) address modes. */
9610 && !reg_addr[mode].scalar_in_vmx_p
9611 && mode != TFmode
9612 && mode != TDmode
9613 && mode != IFmode
9614 && mode != KFmode
9615 && (mode != TImode || !TARGET_VSX)
9616 && mode != PTImode
9617 && (mode != DImode || TARGET_POWERPC64)
9618 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9619 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9621 #if TARGET_MACHO
9622 if (flag_pic)
9624 rtx offset = machopic_gen_offset (x);
9625 x = gen_rtx_LO_SUM (GET_MODE (x),
9626 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9627 gen_rtx_HIGH (Pmode, offset)), offset);
9629 else
9630 #endif
9631 x = gen_rtx_LO_SUM (GET_MODE (x),
9632 gen_rtx_HIGH (Pmode, x), x);
9634 if (TARGET_DEBUG_ADDR)
9636 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9637 debug_rtx (x);
9639 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9640 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9641 opnum, (enum reload_type) type);
9642 *win = 1;
9643 return x;
9646 /* Reload an offset address wrapped by an AND that represents the
9647 masking of the lower bits. Strip the outer AND and let reload
9648 convert the offset address into an indirect address. For VSX,
9649 force reload to create the address with an AND in a separate
9650 register, because we can't guarantee an altivec register will
9651 be used. */
9652 if (VECTOR_MEM_ALTIVEC_P (mode)
9653 && GET_CODE (x) == AND
9654 && GET_CODE (XEXP (x, 0)) == PLUS
9655 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9656 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9657 && GET_CODE (XEXP (x, 1)) == CONST_INT
9658 && INTVAL (XEXP (x, 1)) == -16)
9660 x = XEXP (x, 0);
9661 *win = 1;
9662 return x;
9665 if (TARGET_TOC
9666 && reg_offset_p
9667 && !quad_offset_p
9668 && GET_CODE (x) == SYMBOL_REF
9669 && use_toc_relative_ref (x, mode))
9671 x = create_TOC_reference (x, NULL_RTX);
9672 if (TARGET_CMODEL != CMODEL_SMALL)
9674 if (TARGET_DEBUG_ADDR)
9676 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9677 debug_rtx (x);
9679 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9680 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9681 opnum, (enum reload_type) type);
9683 *win = 1;
9684 return x;
9686 *win = 0;
9687 return x;
9690 /* Debug version of rs6000_legitimize_reload_address. */
9691 static rtx
9692 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9693 int opnum, int type,
9694 int ind_levels, int *win)
9696 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9697 ind_levels, win);
9698 fprintf (stderr,
9699 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9700 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9701 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9702 debug_rtx (x);
9704 if (x == ret)
9705 fprintf (stderr, "Same address returned\n");
9706 else if (!ret)
9707 fprintf (stderr, "NULL returned\n");
9708 else
9710 fprintf (stderr, "New address:\n");
9711 debug_rtx (ret);
9714 return ret;
9717 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9718 that is a valid memory address for an instruction.
9719 The MODE argument is the machine mode for the MEM expression
9720 that wants to use this address.
9722 On the RS/6000, there are four valid address: a SYMBOL_REF that
9723 refers to a constant pool entry of an address (or the sum of it
9724 plus a constant), a short (16-bit signed) constant plus a register,
9725 the sum of two registers, or a register indirect, possibly with an
9726 auto-increment. For DFmode, DDmode and DImode with a constant plus
9727 register, we must ensure that both words are addressable or PowerPC64
9728 with offset word aligned.
9730 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9731 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9732 because adjacent memory cells are accessed by adding word-sized offsets
9733 during assembly output. */
9734 static bool
9735 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9737 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9738 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9740 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9741 if (VECTOR_MEM_ALTIVEC_P (mode)
9742 && GET_CODE (x) == AND
9743 && GET_CODE (XEXP (x, 1)) == CONST_INT
9744 && INTVAL (XEXP (x, 1)) == -16)
9745 x = XEXP (x, 0);
9747 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9748 return 0;
9749 if (legitimate_indirect_address_p (x, reg_ok_strict))
9750 return 1;
9751 if (TARGET_UPDATE
9752 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9753 && mode_supports_pre_incdec_p (mode)
9754 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9755 return 1;
9756 /* Handle restricted vector d-form offsets in ISA 3.0. */
9757 if (quad_offset_p)
9759 if (quad_address_p (x, mode, reg_ok_strict))
9760 return 1;
9762 else if (virtual_stack_registers_memory_p (x))
9763 return 1;
9765 else if (reg_offset_p)
9767 if (legitimate_small_data_p (mode, x))
9768 return 1;
9769 if (legitimate_constant_pool_address_p (x, mode,
9770 reg_ok_strict || lra_in_progress))
9771 return 1;
9772 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9773 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9774 return 1;
9777 /* For TImode, if we have TImode in VSX registers, only allow register
9778 indirect addresses. This will allow the values to go in either GPRs
9779 or VSX registers without reloading. The vector types would tend to
9780 go into VSX registers, so we allow REG+REG, while TImode seems
9781 somewhat split, in that some uses are GPR based, and some VSX based. */
9782 /* FIXME: We could loosen this by changing the following to
9783 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9784 but currently we cannot allow REG+REG addressing for TImode. See
9785 PR72827 for complete details on how this ends up hoodwinking DSE. */
9786 if (mode == TImode && TARGET_VSX)
9787 return 0;
9788 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9789 if (! reg_ok_strict
9790 && reg_offset_p
9791 && GET_CODE (x) == PLUS
9792 && GET_CODE (XEXP (x, 0)) == REG
9793 && (XEXP (x, 0) == virtual_stack_vars_rtx
9794 || XEXP (x, 0) == arg_pointer_rtx)
9795 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9796 return 1;
9797 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9798 return 1;
9799 if (!FLOAT128_2REG_P (mode)
9800 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9801 || TARGET_POWERPC64
9802 || (mode != DFmode && mode != DDmode))
9803 && (TARGET_POWERPC64 || mode != DImode)
9804 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9805 && mode != PTImode
9806 && !avoiding_indexed_address_p (mode)
9807 && legitimate_indexed_address_p (x, reg_ok_strict))
9808 return 1;
9809 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9810 && mode_supports_pre_modify_p (mode)
9811 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9812 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9813 reg_ok_strict, false)
9814 || (!avoiding_indexed_address_p (mode)
9815 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9816 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9817 return 1;
9818 if (reg_offset_p && !quad_offset_p
9819 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9820 return 1;
9821 return 0;
9824 /* Debug version of rs6000_legitimate_address_p. */
9825 static bool
9826 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9827 bool reg_ok_strict)
9829 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9830 fprintf (stderr,
9831 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9832 "strict = %d, reload = %s, code = %s\n",
9833 ret ? "true" : "false",
9834 GET_MODE_NAME (mode),
9835 reg_ok_strict,
9836 (reload_completed ? "after" : "before"),
9837 GET_RTX_NAME (GET_CODE (x)));
9838 debug_rtx (x);
9840 return ret;
9843 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9845 static bool
9846 rs6000_mode_dependent_address_p (const_rtx addr,
9847 addr_space_t as ATTRIBUTE_UNUSED)
9849 return rs6000_mode_dependent_address_ptr (addr);
9852 /* Go to LABEL if ADDR (a legitimate address expression)
9853 has an effect that depends on the machine mode it is used for.
9855 On the RS/6000 this is true of all integral offsets (since AltiVec
9856 and VSX modes don't allow them) or is a pre-increment or decrement.
9858 ??? Except that due to conceptual problems in offsettable_address_p
9859 we can't really report the problems of integral offsets. So leave
9860 this assuming that the adjustable offset must be valid for the
9861 sub-words of a TFmode operand, which is what we had before. */
9863 static bool
9864 rs6000_mode_dependent_address (const_rtx addr)
9866 switch (GET_CODE (addr))
9868 case PLUS:
9869 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9870 is considered a legitimate address before reload, so there
9871 are no offset restrictions in that case. Note that this
9872 condition is safe in strict mode because any address involving
9873 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9874 been rejected as illegitimate. */
9875 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9876 && XEXP (addr, 0) != arg_pointer_rtx
9877 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9879 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9880 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9882 break;
9884 case LO_SUM:
9885 /* Anything in the constant pool is sufficiently aligned that
9886 all bytes have the same high part address. */
9887 return !legitimate_constant_pool_address_p (addr, QImode, false);
9889 /* Auto-increment cases are now treated generically in recog.c. */
9890 case PRE_MODIFY:
9891 return TARGET_UPDATE;
9893 /* AND is only allowed in Altivec loads. */
9894 case AND:
9895 return true;
9897 default:
9898 break;
9901 return false;
9904 /* Debug version of rs6000_mode_dependent_address. */
9905 static bool
9906 rs6000_debug_mode_dependent_address (const_rtx addr)
9908 bool ret = rs6000_mode_dependent_address (addr);
9910 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9911 ret ? "true" : "false");
9912 debug_rtx (addr);
9914 return ret;
9917 /* Implement FIND_BASE_TERM. */
9920 rs6000_find_base_term (rtx op)
9922 rtx base;
9924 base = op;
9925 if (GET_CODE (base) == CONST)
9926 base = XEXP (base, 0);
9927 if (GET_CODE (base) == PLUS)
9928 base = XEXP (base, 0);
9929 if (GET_CODE (base) == UNSPEC)
9930 switch (XINT (base, 1))
9932 case UNSPEC_TOCREL:
9933 case UNSPEC_MACHOPIC_OFFSET:
9934 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9935 for aliasing purposes. */
9936 return XVECEXP (base, 0, 0);
9939 return op;
9942 /* More elaborate version of recog's offsettable_memref_p predicate
9943 that works around the ??? note of rs6000_mode_dependent_address.
9944 In particular it accepts
9946 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9948 in 32-bit mode, that the recog predicate rejects. */
9950 static bool
9951 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9953 bool worst_case;
9955 if (!MEM_P (op))
9956 return false;
9958 /* First mimic offsettable_memref_p. */
9959 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9960 return true;
9962 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9963 the latter predicate knows nothing about the mode of the memory
9964 reference and, therefore, assumes that it is the largest supported
9965 mode (TFmode). As a consequence, legitimate offsettable memory
9966 references are rejected. rs6000_legitimate_offset_address_p contains
9967 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9968 at least with a little bit of help here given that we know the
9969 actual registers used. */
9970 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9971 || GET_MODE_SIZE (reg_mode) == 4);
9972 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9973 true, worst_case);
9976 /* Determine the reassociation width to be used in reassociate_bb.
9977 This takes into account how many parallel operations we
9978 can actually do of a given type, and also the latency.
9980 int add/sub 6/cycle
9981 mul 2/cycle
9982 vect add/sub/mul 2/cycle
9983 fp add/sub/mul 2/cycle
9984 dfp 1/cycle
9987 static int
9988 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9989 machine_mode mode)
9991 switch (rs6000_tune)
9993 case PROCESSOR_POWER8:
9994 case PROCESSOR_POWER9:
9995 if (DECIMAL_FLOAT_MODE_P (mode))
9996 return 1;
9997 if (VECTOR_MODE_P (mode))
9998 return 4;
9999 if (INTEGRAL_MODE_P (mode))
10000 return opc == MULT_EXPR ? 4 : 6;
10001 if (FLOAT_MODE_P (mode))
10002 return 4;
10003 break;
10004 default:
10005 break;
10007 return 1;
10010 /* Change register usage conditional on target flags. */
10011 static void
10012 rs6000_conditional_register_usage (void)
10014 int i;
10016 if (TARGET_DEBUG_TARGET)
10017 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10019 /* Set MQ register fixed (already call_used) so that it will not be
10020 allocated. */
10021 fixed_regs[64] = 1;
10023 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10024 if (TARGET_64BIT)
10025 fixed_regs[13] = call_used_regs[13]
10026 = call_really_used_regs[13] = 1;
10028 /* Conditionally disable FPRs. */
10029 if (TARGET_SOFT_FLOAT)
10030 for (i = 32; i < 64; i++)
10031 fixed_regs[i] = call_used_regs[i]
10032 = call_really_used_regs[i] = 1;
10034 /* The TOC register is not killed across calls in a way that is
10035 visible to the compiler. */
10036 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10037 call_really_used_regs[2] = 0;
10039 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10040 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10042 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10043 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10044 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10045 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10047 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10048 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10049 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10050 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10052 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10053 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10054 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10056 if (!TARGET_ALTIVEC && !TARGET_VSX)
10058 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10059 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10060 call_really_used_regs[VRSAVE_REGNO] = 1;
10063 if (TARGET_ALTIVEC || TARGET_VSX)
10064 global_regs[VSCR_REGNO] = 1;
10066 if (TARGET_ALTIVEC_ABI)
10068 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10069 call_used_regs[i] = call_really_used_regs[i] = 1;
10071 /* AIX reserves VR20:31 in non-extended ABI mode. */
10072 if (TARGET_XCOFF)
10073 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10074 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10079 /* Output insns to set DEST equal to the constant SOURCE as a series of
10080 lis, ori and shl instructions and return TRUE. */
10082 bool
10083 rs6000_emit_set_const (rtx dest, rtx source)
10085 machine_mode mode = GET_MODE (dest);
10086 rtx temp, set;
10087 rtx_insn *insn;
10088 HOST_WIDE_INT c;
10090 gcc_checking_assert (CONST_INT_P (source));
10091 c = INTVAL (source);
10092 switch (mode)
10094 case E_QImode:
10095 case E_HImode:
10096 emit_insn (gen_rtx_SET (dest, source));
10097 return true;
10099 case E_SImode:
10100 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10102 emit_insn (gen_rtx_SET (copy_rtx (temp),
10103 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10104 emit_insn (gen_rtx_SET (dest,
10105 gen_rtx_IOR (SImode, copy_rtx (temp),
10106 GEN_INT (c & 0xffff))));
10107 break;
10109 case E_DImode:
10110 if (!TARGET_POWERPC64)
10112 rtx hi, lo;
10114 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10115 DImode);
10116 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10117 DImode);
10118 emit_move_insn (hi, GEN_INT (c >> 32));
10119 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10120 emit_move_insn (lo, GEN_INT (c));
10122 else
10123 rs6000_emit_set_long_const (dest, c);
10124 break;
10126 default:
10127 gcc_unreachable ();
10130 insn = get_last_insn ();
10131 set = single_set (insn);
10132 if (! CONSTANT_P (SET_SRC (set)))
10133 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10135 return true;
10138 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10139 Output insns to set DEST equal to the constant C as a series of
10140 lis, ori and shl instructions. */
10142 static void
10143 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10145 rtx temp;
10146 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10148 ud1 = c & 0xffff;
10149 c = c >> 16;
10150 ud2 = c & 0xffff;
10151 c = c >> 16;
10152 ud3 = c & 0xffff;
10153 c = c >> 16;
10154 ud4 = c & 0xffff;
10156 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10157 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10158 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10160 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10161 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10163 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10165 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10166 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10167 if (ud1 != 0)
10168 emit_move_insn (dest,
10169 gen_rtx_IOR (DImode, copy_rtx (temp),
10170 GEN_INT (ud1)));
10172 else if (ud3 == 0 && ud4 == 0)
10174 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10176 gcc_assert (ud2 & 0x8000);
10177 emit_move_insn (copy_rtx (temp),
10178 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10179 if (ud1 != 0)
10180 emit_move_insn (copy_rtx (temp),
10181 gen_rtx_IOR (DImode, copy_rtx (temp),
10182 GEN_INT (ud1)));
10183 emit_move_insn (dest,
10184 gen_rtx_ZERO_EXTEND (DImode,
10185 gen_lowpart (SImode,
10186 copy_rtx (temp))));
10188 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10189 || (ud4 == 0 && ! (ud3 & 0x8000)))
10191 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10193 emit_move_insn (copy_rtx (temp),
10194 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10195 if (ud2 != 0)
10196 emit_move_insn (copy_rtx (temp),
10197 gen_rtx_IOR (DImode, copy_rtx (temp),
10198 GEN_INT (ud2)));
10199 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10200 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10201 GEN_INT (16)));
10202 if (ud1 != 0)
10203 emit_move_insn (dest,
10204 gen_rtx_IOR (DImode, copy_rtx (temp),
10205 GEN_INT (ud1)));
10207 else
10209 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10211 emit_move_insn (copy_rtx (temp),
10212 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10213 if (ud3 != 0)
10214 emit_move_insn (copy_rtx (temp),
10215 gen_rtx_IOR (DImode, copy_rtx (temp),
10216 GEN_INT (ud3)));
10218 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10219 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10220 GEN_INT (32)));
10221 if (ud2 != 0)
10222 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10223 gen_rtx_IOR (DImode, copy_rtx (temp),
10224 GEN_INT (ud2 << 16)));
10225 if (ud1 != 0)
10226 emit_move_insn (dest,
10227 gen_rtx_IOR (DImode, copy_rtx (temp),
10228 GEN_INT (ud1)));
10232 /* Helper for the following. Get rid of [r+r] memory refs
10233 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10235 static void
10236 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10238 if (GET_CODE (operands[0]) == MEM
10239 && GET_CODE (XEXP (operands[0], 0)) != REG
10240 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10241 GET_MODE (operands[0]), false))
10242 operands[0]
10243 = replace_equiv_address (operands[0],
10244 copy_addr_to_reg (XEXP (operands[0], 0)));
10246 if (GET_CODE (operands[1]) == MEM
10247 && GET_CODE (XEXP (operands[1], 0)) != REG
10248 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10249 GET_MODE (operands[1]), false))
10250 operands[1]
10251 = replace_equiv_address (operands[1],
10252 copy_addr_to_reg (XEXP (operands[1], 0)));
10255 /* Generate a vector of constants to permute MODE for a little-endian
10256 storage operation by swapping the two halves of a vector. */
10257 static rtvec
10258 rs6000_const_vec (machine_mode mode)
10260 int i, subparts;
10261 rtvec v;
10263 switch (mode)
10265 case E_V1TImode:
10266 subparts = 1;
10267 break;
10268 case E_V2DFmode:
10269 case E_V2DImode:
10270 subparts = 2;
10271 break;
10272 case E_V4SFmode:
10273 case E_V4SImode:
10274 subparts = 4;
10275 break;
10276 case E_V8HImode:
10277 subparts = 8;
10278 break;
10279 case E_V16QImode:
10280 subparts = 16;
10281 break;
10282 default:
10283 gcc_unreachable();
10286 v = rtvec_alloc (subparts);
10288 for (i = 0; i < subparts / 2; ++i)
10289 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10290 for (i = subparts / 2; i < subparts; ++i)
10291 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10293 return v;
10296 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10297 store operation. */
10298 void
10299 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10301 /* Scalar permutations are easier to express in integer modes rather than
10302 floating-point modes, so cast them here. We use V1TImode instead
10303 of TImode to ensure that the values don't go through GPRs. */
10304 if (FLOAT128_VECTOR_P (mode))
10306 dest = gen_lowpart (V1TImode, dest);
10307 source = gen_lowpart (V1TImode, source);
10308 mode = V1TImode;
10311 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10312 scalar. */
10313 if (mode == TImode || mode == V1TImode)
10314 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10315 GEN_INT (64))));
10316 else
10318 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10319 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10323 /* Emit a little-endian load from vector memory location SOURCE to VSX
10324 register DEST in mode MODE. The load is done with two permuting
10325 insn's that represent an lxvd2x and xxpermdi. */
10326 void
10327 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10329 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10330 V1TImode). */
10331 if (mode == TImode || mode == V1TImode)
10333 mode = V2DImode;
10334 dest = gen_lowpart (V2DImode, dest);
10335 source = adjust_address (source, V2DImode, 0);
10338 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10339 rs6000_emit_le_vsx_permute (tmp, source, mode);
10340 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10343 /* Emit a little-endian store to vector memory location DEST from VSX
10344 register SOURCE in mode MODE. The store is done with two permuting
10345 insn's that represent an xxpermdi and an stxvd2x. */
10346 void
10347 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10349 /* This should never be called during or after LRA, because it does
10350 not re-permute the source register. It is intended only for use
10351 during expand. */
10352 gcc_assert (!lra_in_progress && !reload_completed);
10354 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10355 V1TImode). */
10356 if (mode == TImode || mode == V1TImode)
10358 mode = V2DImode;
10359 dest = adjust_address (dest, V2DImode, 0);
10360 source = gen_lowpart (V2DImode, source);
10363 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10364 rs6000_emit_le_vsx_permute (tmp, source, mode);
10365 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10368 /* Emit a sequence representing a little-endian VSX load or store,
10369 moving data from SOURCE to DEST in mode MODE. This is done
10370 separately from rs6000_emit_move to ensure it is called only
10371 during expand. LE VSX loads and stores introduced later are
10372 handled with a split. The expand-time RTL generation allows
10373 us to optimize away redundant pairs of register-permutes. */
10374 void
10375 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10377 gcc_assert (!BYTES_BIG_ENDIAN
10378 && VECTOR_MEM_VSX_P (mode)
10379 && !TARGET_P9_VECTOR
10380 && !gpr_or_gpr_p (dest, source)
10381 && (MEM_P (source) ^ MEM_P (dest)));
10383 if (MEM_P (source))
10385 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10386 rs6000_emit_le_vsx_load (dest, source, mode);
10388 else
10390 if (!REG_P (source))
10391 source = force_reg (mode, source);
10392 rs6000_emit_le_vsx_store (dest, source, mode);
10396 /* Return whether a SFmode or SImode move can be done without converting one
10397 mode to another. This arrises when we have:
10399 (SUBREG:SF (REG:SI ...))
10400 (SUBREG:SI (REG:SF ...))
10402 and one of the values is in a floating point/vector register, where SFmode
10403 scalars are stored in DFmode format. */
10405 bool
10406 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10408 if (TARGET_ALLOW_SF_SUBREG)
10409 return true;
10411 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10412 return true;
10414 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10415 return true;
10417 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10418 if (SUBREG_P (dest))
10420 rtx dest_subreg = SUBREG_REG (dest);
10421 rtx src_subreg = SUBREG_REG (src);
10422 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10425 return false;
10429 /* Helper function to change moves with:
10431 (SUBREG:SF (REG:SI)) and
10432 (SUBREG:SI (REG:SF))
10434 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10435 values are stored as DFmode values in the VSX registers. We need to convert
10436 the bits before we can use a direct move or operate on the bits in the
10437 vector register as an integer type.
10439 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10441 static bool
10442 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10444 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10445 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10446 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10448 rtx inner_source = SUBREG_REG (source);
10449 machine_mode inner_mode = GET_MODE (inner_source);
10451 if (mode == SImode && inner_mode == SFmode)
10453 emit_insn (gen_movsi_from_sf (dest, inner_source));
10454 return true;
10457 if (mode == SFmode && inner_mode == SImode)
10459 emit_insn (gen_movsf_from_si (dest, inner_source));
10460 return true;
10464 return false;
10467 /* Emit a move from SOURCE to DEST in mode MODE. */
10468 void
10469 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10471 rtx operands[2];
10472 operands[0] = dest;
10473 operands[1] = source;
10475 if (TARGET_DEBUG_ADDR)
10477 fprintf (stderr,
10478 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10479 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10480 GET_MODE_NAME (mode),
10481 lra_in_progress,
10482 reload_completed,
10483 can_create_pseudo_p ());
10484 debug_rtx (dest);
10485 fprintf (stderr, "source:\n");
10486 debug_rtx (source);
10489 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10490 if (CONST_WIDE_INT_P (operands[1])
10491 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10493 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10494 gcc_unreachable ();
10497 #ifdef HAVE_AS_GNU_ATTRIBUTE
10498 /* If we use a long double type, set the flags in .gnu_attribute that say
10499 what the long double type is. This is to allow the linker's warning
10500 message for the wrong long double to be useful, even if the function does
10501 not do a call (for example, doing a 128-bit add on power9 if the long
10502 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10503 used if they aren't the default long dobule type. */
10504 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
10506 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
10507 rs6000_passes_float = rs6000_passes_long_double = true;
10509 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
10510 rs6000_passes_float = rs6000_passes_long_double = true;
10512 #endif
10514 /* See if we need to special case SImode/SFmode SUBREG moves. */
10515 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10516 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10517 return;
10519 /* Check if GCC is setting up a block move that will end up using FP
10520 registers as temporaries. We must make sure this is acceptable. */
10521 if (GET_CODE (operands[0]) == MEM
10522 && GET_CODE (operands[1]) == MEM
10523 && mode == DImode
10524 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10525 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10526 && ! (rs6000_slow_unaligned_access (SImode,
10527 (MEM_ALIGN (operands[0]) > 32
10528 ? 32 : MEM_ALIGN (operands[0])))
10529 || rs6000_slow_unaligned_access (SImode,
10530 (MEM_ALIGN (operands[1]) > 32
10531 ? 32 : MEM_ALIGN (operands[1]))))
10532 && ! MEM_VOLATILE_P (operands [0])
10533 && ! MEM_VOLATILE_P (operands [1]))
10535 emit_move_insn (adjust_address (operands[0], SImode, 0),
10536 adjust_address (operands[1], SImode, 0));
10537 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10538 adjust_address (copy_rtx (operands[1]), SImode, 4));
10539 return;
10542 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10543 && !gpc_reg_operand (operands[1], mode))
10544 operands[1] = force_reg (mode, operands[1]);
10546 /* Recognize the case where operand[1] is a reference to thread-local
10547 data and load its address to a register. */
10548 if (tls_referenced_p (operands[1]))
10550 enum tls_model model;
10551 rtx tmp = operands[1];
10552 rtx addend = NULL;
10554 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10556 addend = XEXP (XEXP (tmp, 0), 1);
10557 tmp = XEXP (XEXP (tmp, 0), 0);
10560 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10561 model = SYMBOL_REF_TLS_MODEL (tmp);
10562 gcc_assert (model != 0);
10564 tmp = rs6000_legitimize_tls_address (tmp, model);
10565 if (addend)
10567 tmp = gen_rtx_PLUS (mode, tmp, addend);
10568 tmp = force_operand (tmp, operands[0]);
10570 operands[1] = tmp;
10573 /* 128-bit constant floating-point values on Darwin should really be loaded
10574 as two parts. However, this premature splitting is a problem when DFmode
10575 values can go into Altivec registers. */
10576 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10577 && GET_CODE (operands[1]) == CONST_DOUBLE)
10579 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10580 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10581 DFmode);
10582 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10583 GET_MODE_SIZE (DFmode)),
10584 simplify_gen_subreg (DFmode, operands[1], mode,
10585 GET_MODE_SIZE (DFmode)),
10586 DFmode);
10587 return;
10590 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10591 p1:SD) if p1 is not of floating point class and p0 is spilled as
10592 we can have no analogous movsd_store for this. */
10593 if (lra_in_progress && mode == DDmode
10594 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10595 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10596 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10597 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10599 enum reg_class cl;
10600 int regno = REGNO (SUBREG_REG (operands[1]));
10602 if (regno >= FIRST_PSEUDO_REGISTER)
10604 cl = reg_preferred_class (regno);
10605 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10607 if (regno >= 0 && ! FP_REGNO_P (regno))
10609 mode = SDmode;
10610 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10611 operands[1] = SUBREG_REG (operands[1]);
10614 if (lra_in_progress
10615 && mode == SDmode
10616 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10617 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10618 && (REG_P (operands[1])
10619 || (GET_CODE (operands[1]) == SUBREG
10620 && REG_P (SUBREG_REG (operands[1])))))
10622 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10623 ? SUBREG_REG (operands[1]) : operands[1]);
10624 enum reg_class cl;
10626 if (regno >= FIRST_PSEUDO_REGISTER)
10628 cl = reg_preferred_class (regno);
10629 gcc_assert (cl != NO_REGS);
10630 regno = ira_class_hard_regs[cl][0];
10632 if (FP_REGNO_P (regno))
10634 if (GET_MODE (operands[0]) != DDmode)
10635 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10636 emit_insn (gen_movsd_store (operands[0], operands[1]));
10638 else if (INT_REGNO_P (regno))
10639 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10640 else
10641 gcc_unreachable();
10642 return;
10644 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10645 p:DD)) if p0 is not of floating point class and p1 is spilled as
10646 we can have no analogous movsd_load for this. */
10647 if (lra_in_progress && mode == DDmode
10648 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10649 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10650 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10651 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10653 enum reg_class cl;
10654 int regno = REGNO (SUBREG_REG (operands[0]));
10656 if (regno >= FIRST_PSEUDO_REGISTER)
10658 cl = reg_preferred_class (regno);
10659 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10661 if (regno >= 0 && ! FP_REGNO_P (regno))
10663 mode = SDmode;
10664 operands[0] = SUBREG_REG (operands[0]);
10665 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10668 if (lra_in_progress
10669 && mode == SDmode
10670 && (REG_P (operands[0])
10671 || (GET_CODE (operands[0]) == SUBREG
10672 && REG_P (SUBREG_REG (operands[0]))))
10673 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10674 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10676 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10677 ? SUBREG_REG (operands[0]) : operands[0]);
10678 enum reg_class cl;
10680 if (regno >= FIRST_PSEUDO_REGISTER)
10682 cl = reg_preferred_class (regno);
10683 gcc_assert (cl != NO_REGS);
10684 regno = ira_class_hard_regs[cl][0];
10686 if (FP_REGNO_P (regno))
10688 if (GET_MODE (operands[1]) != DDmode)
10689 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10690 emit_insn (gen_movsd_load (operands[0], operands[1]));
10692 else if (INT_REGNO_P (regno))
10693 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10694 else
10695 gcc_unreachable();
10696 return;
10699 /* FIXME: In the long term, this switch statement should go away
10700 and be replaced by a sequence of tests based on things like
10701 mode == Pmode. */
10702 switch (mode)
10704 case E_HImode:
10705 case E_QImode:
10706 if (CONSTANT_P (operands[1])
10707 && GET_CODE (operands[1]) != CONST_INT)
10708 operands[1] = force_const_mem (mode, operands[1]);
10709 break;
10711 case E_TFmode:
10712 case E_TDmode:
10713 case E_IFmode:
10714 case E_KFmode:
10715 if (FLOAT128_2REG_P (mode))
10716 rs6000_eliminate_indexed_memrefs (operands);
10717 /* fall through */
10719 case E_DFmode:
10720 case E_DDmode:
10721 case E_SFmode:
10722 case E_SDmode:
10723 if (CONSTANT_P (operands[1])
10724 && ! easy_fp_constant (operands[1], mode))
10725 operands[1] = force_const_mem (mode, operands[1]);
10726 break;
10728 case E_V16QImode:
10729 case E_V8HImode:
10730 case E_V4SFmode:
10731 case E_V4SImode:
10732 case E_V2SFmode:
10733 case E_V2SImode:
10734 case E_V2DFmode:
10735 case E_V2DImode:
10736 case E_V1TImode:
10737 if (CONSTANT_P (operands[1])
10738 && !easy_vector_constant (operands[1], mode))
10739 operands[1] = force_const_mem (mode, operands[1]);
10740 break;
10742 case E_SImode:
10743 case E_DImode:
10744 /* Use default pattern for address of ELF small data */
10745 if (TARGET_ELF
10746 && mode == Pmode
10747 && DEFAULT_ABI == ABI_V4
10748 && (GET_CODE (operands[1]) == SYMBOL_REF
10749 || GET_CODE (operands[1]) == CONST)
10750 && small_data_operand (operands[1], mode))
10752 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10753 return;
10756 if (DEFAULT_ABI == ABI_V4
10757 && mode == Pmode && mode == SImode
10758 && flag_pic == 1 && got_operand (operands[1], mode))
10760 emit_insn (gen_movsi_got (operands[0], operands[1]));
10761 return;
10764 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10765 && TARGET_NO_TOC
10766 && ! flag_pic
10767 && mode == Pmode
10768 && CONSTANT_P (operands[1])
10769 && GET_CODE (operands[1]) != HIGH
10770 && GET_CODE (operands[1]) != CONST_INT)
10772 rtx target = (!can_create_pseudo_p ()
10773 ? operands[0]
10774 : gen_reg_rtx (mode));
10776 /* If this is a function address on -mcall-aixdesc,
10777 convert it to the address of the descriptor. */
10778 if (DEFAULT_ABI == ABI_AIX
10779 && GET_CODE (operands[1]) == SYMBOL_REF
10780 && XSTR (operands[1], 0)[0] == '.')
10782 const char *name = XSTR (operands[1], 0);
10783 rtx new_ref;
10784 while (*name == '.')
10785 name++;
10786 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10787 CONSTANT_POOL_ADDRESS_P (new_ref)
10788 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10789 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10790 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10791 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10792 operands[1] = new_ref;
10795 if (DEFAULT_ABI == ABI_DARWIN)
10797 #if TARGET_MACHO
10798 if (MACHO_DYNAMIC_NO_PIC_P)
10800 /* Take care of any required data indirection. */
10801 operands[1] = rs6000_machopic_legitimize_pic_address (
10802 operands[1], mode, operands[0]);
10803 if (operands[0] != operands[1])
10804 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10805 return;
10807 #endif
10808 emit_insn (gen_macho_high (target, operands[1]));
10809 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10810 return;
10813 emit_insn (gen_elf_high (target, operands[1]));
10814 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10815 return;
10818 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10819 and we have put it in the TOC, we just need to make a TOC-relative
10820 reference to it. */
10821 if (TARGET_TOC
10822 && GET_CODE (operands[1]) == SYMBOL_REF
10823 && use_toc_relative_ref (operands[1], mode))
10824 operands[1] = create_TOC_reference (operands[1], operands[0]);
10825 else if (mode == Pmode
10826 && CONSTANT_P (operands[1])
10827 && GET_CODE (operands[1]) != HIGH
10828 && ((GET_CODE (operands[1]) != CONST_INT
10829 && ! easy_fp_constant (operands[1], mode))
10830 || (GET_CODE (operands[1]) == CONST_INT
10831 && (num_insns_constant (operands[1], mode)
10832 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10833 || (GET_CODE (operands[0]) == REG
10834 && FP_REGNO_P (REGNO (operands[0]))))
10835 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10836 && (TARGET_CMODEL == CMODEL_SMALL
10837 || can_create_pseudo_p ()
10838 || (REG_P (operands[0])
10839 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10842 #if TARGET_MACHO
10843 /* Darwin uses a special PIC legitimizer. */
10844 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10846 operands[1] =
10847 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10848 operands[0]);
10849 if (operands[0] != operands[1])
10850 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10851 return;
10853 #endif
10855 /* If we are to limit the number of things we put in the TOC and
10856 this is a symbol plus a constant we can add in one insn,
10857 just put the symbol in the TOC and add the constant. */
10858 if (GET_CODE (operands[1]) == CONST
10859 && TARGET_NO_SUM_IN_TOC
10860 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10861 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10862 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10863 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10864 && ! side_effects_p (operands[0]))
10866 rtx sym =
10867 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10868 rtx other = XEXP (XEXP (operands[1], 0), 1);
10870 sym = force_reg (mode, sym);
10871 emit_insn (gen_add3_insn (operands[0], sym, other));
10872 return;
10875 operands[1] = force_const_mem (mode, operands[1]);
10877 if (TARGET_TOC
10878 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10879 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10881 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10882 operands[0]);
10883 operands[1] = gen_const_mem (mode, tocref);
10884 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10887 break;
10889 case E_TImode:
10890 if (!VECTOR_MEM_VSX_P (TImode))
10891 rs6000_eliminate_indexed_memrefs (operands);
10892 break;
10894 case E_PTImode:
10895 rs6000_eliminate_indexed_memrefs (operands);
10896 break;
10898 default:
10899 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10902 /* Above, we may have called force_const_mem which may have returned
10903 an invalid address. If we can, fix this up; otherwise, reload will
10904 have to deal with it. */
10905 if (GET_CODE (operands[1]) == MEM)
10906 operands[1] = validize_mem (operands[1]);
10908 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10911 /* Nonzero if we can use a floating-point register to pass this arg. */
10912 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10913 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10914 && (CUM)->fregno <= FP_ARG_MAX_REG \
10915 && TARGET_HARD_FLOAT)
10917 /* Nonzero if we can use an AltiVec register to pass this arg. */
10918 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10919 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10920 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10921 && TARGET_ALTIVEC_ABI \
10922 && (NAMED))
10924 /* Walk down the type tree of TYPE counting consecutive base elements.
10925 If *MODEP is VOIDmode, then set it to the first valid floating point
10926 or vector type. If a non-floating point or vector type is found, or
10927 if a floating point or vector type that doesn't match a non-VOIDmode
10928 *MODEP is found, then return -1, otherwise return the count in the
10929 sub-tree. */
10931 static int
10932 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10934 machine_mode mode;
10935 HOST_WIDE_INT size;
10937 switch (TREE_CODE (type))
10939 case REAL_TYPE:
10940 mode = TYPE_MODE (type);
10941 if (!SCALAR_FLOAT_MODE_P (mode))
10942 return -1;
10944 if (*modep == VOIDmode)
10945 *modep = mode;
10947 if (*modep == mode)
10948 return 1;
10950 break;
10952 case COMPLEX_TYPE:
10953 mode = TYPE_MODE (TREE_TYPE (type));
10954 if (!SCALAR_FLOAT_MODE_P (mode))
10955 return -1;
10957 if (*modep == VOIDmode)
10958 *modep = mode;
10960 if (*modep == mode)
10961 return 2;
10963 break;
10965 case VECTOR_TYPE:
10966 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10967 return -1;
10969 /* Use V4SImode as representative of all 128-bit vector types. */
10970 size = int_size_in_bytes (type);
10971 switch (size)
10973 case 16:
10974 mode = V4SImode;
10975 break;
10976 default:
10977 return -1;
10980 if (*modep == VOIDmode)
10981 *modep = mode;
10983 /* Vector modes are considered to be opaque: two vectors are
10984 equivalent for the purposes of being homogeneous aggregates
10985 if they are the same size. */
10986 if (*modep == mode)
10987 return 1;
10989 break;
10991 case ARRAY_TYPE:
10993 int count;
10994 tree index = TYPE_DOMAIN (type);
10996 /* Can't handle incomplete types nor sizes that are not
10997 fixed. */
10998 if (!COMPLETE_TYPE_P (type)
10999 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11000 return -1;
11002 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11003 if (count == -1
11004 || !index
11005 || !TYPE_MAX_VALUE (index)
11006 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11007 || !TYPE_MIN_VALUE (index)
11008 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11009 || count < 0)
11010 return -1;
11012 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11013 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11015 /* There must be no padding. */
11016 if (wi::to_wide (TYPE_SIZE (type))
11017 != count * GET_MODE_BITSIZE (*modep))
11018 return -1;
11020 return count;
11023 case RECORD_TYPE:
11025 int count = 0;
11026 int sub_count;
11027 tree field;
11029 /* Can't handle incomplete types nor sizes that are not
11030 fixed. */
11031 if (!COMPLETE_TYPE_P (type)
11032 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11033 return -1;
11035 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11037 if (TREE_CODE (field) != FIELD_DECL)
11038 continue;
11040 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11041 if (sub_count < 0)
11042 return -1;
11043 count += sub_count;
11046 /* There must be no padding. */
11047 if (wi::to_wide (TYPE_SIZE (type))
11048 != count * GET_MODE_BITSIZE (*modep))
11049 return -1;
11051 return count;
11054 case UNION_TYPE:
11055 case QUAL_UNION_TYPE:
11057 /* These aren't very interesting except in a degenerate case. */
11058 int count = 0;
11059 int sub_count;
11060 tree field;
11062 /* Can't handle incomplete types nor sizes that are not
11063 fixed. */
11064 if (!COMPLETE_TYPE_P (type)
11065 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11066 return -1;
11068 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11070 if (TREE_CODE (field) != FIELD_DECL)
11071 continue;
11073 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11074 if (sub_count < 0)
11075 return -1;
11076 count = count > sub_count ? count : sub_count;
11079 /* There must be no padding. */
11080 if (wi::to_wide (TYPE_SIZE (type))
11081 != count * GET_MODE_BITSIZE (*modep))
11082 return -1;
11084 return count;
11087 default:
11088 break;
11091 return -1;
11094 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11095 float or vector aggregate that shall be passed in FP/vector registers
11096 according to the ELFv2 ABI, return the homogeneous element mode in
11097 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11099 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11101 static bool
11102 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11103 machine_mode *elt_mode,
11104 int *n_elts)
11106 /* Note that we do not accept complex types at the top level as
11107 homogeneous aggregates; these types are handled via the
11108 targetm.calls.split_complex_arg mechanism. Complex types
11109 can be elements of homogeneous aggregates, however. */
11110 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
11111 && AGGREGATE_TYPE_P (type))
11113 machine_mode field_mode = VOIDmode;
11114 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11116 if (field_count > 0)
11118 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11119 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11121 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11122 up to AGGR_ARG_NUM_REG registers. */
11123 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11125 if (elt_mode)
11126 *elt_mode = field_mode;
11127 if (n_elts)
11128 *n_elts = field_count;
11129 return true;
11134 if (elt_mode)
11135 *elt_mode = mode;
11136 if (n_elts)
11137 *n_elts = 1;
11138 return false;
11141 /* Return a nonzero value to say to return the function value in
11142 memory, just as large structures are always returned. TYPE will be
11143 the data type of the value, and FNTYPE will be the type of the
11144 function doing the returning, or @code{NULL} for libcalls.
11146 The AIX ABI for the RS/6000 specifies that all structures are
11147 returned in memory. The Darwin ABI does the same.
11149 For the Darwin 64 Bit ABI, a function result can be returned in
11150 registers or in memory, depending on the size of the return data
11151 type. If it is returned in registers, the value occupies the same
11152 registers as it would if it were the first and only function
11153 argument. Otherwise, the function places its result in memory at
11154 the location pointed to by GPR3.
11156 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11157 but a draft put them in memory, and GCC used to implement the draft
11158 instead of the final standard. Therefore, aix_struct_return
11159 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11160 compatibility can change DRAFT_V4_STRUCT_RET to override the
11161 default, and -m switches get the final word. See
11162 rs6000_option_override_internal for more details.
11164 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11165 long double support is enabled. These values are returned in memory.
11167 int_size_in_bytes returns -1 for variable size objects, which go in
11168 memory always. The cast to unsigned makes -1 > 8. */
11170 static bool
11171 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11173 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11174 if (TARGET_MACHO
11175 && rs6000_darwin64_abi
11176 && TREE_CODE (type) == RECORD_TYPE
11177 && int_size_in_bytes (type) > 0)
11179 CUMULATIVE_ARGS valcum;
11180 rtx valret;
11182 valcum.words = 0;
11183 valcum.fregno = FP_ARG_MIN_REG;
11184 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11185 /* Do a trial code generation as if this were going to be passed
11186 as an argument; if any part goes in memory, we return NULL. */
11187 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11188 if (valret)
11189 return false;
11190 /* Otherwise fall through to more conventional ABI rules. */
11193 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11194 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11195 NULL, NULL))
11196 return false;
11198 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11199 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11200 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11201 return false;
11203 if (AGGREGATE_TYPE_P (type)
11204 && (aix_struct_return
11205 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11206 return true;
11208 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11209 modes only exist for GCC vector types if -maltivec. */
11210 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11211 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11212 return false;
11214 /* Return synthetic vectors in memory. */
11215 if (TREE_CODE (type) == VECTOR_TYPE
11216 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11218 static bool warned_for_return_big_vectors = false;
11219 if (!warned_for_return_big_vectors)
11221 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11222 "non-standard ABI extension with no compatibility "
11223 "guarantee");
11224 warned_for_return_big_vectors = true;
11226 return true;
11229 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11230 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11231 return true;
11233 return false;
11236 /* Specify whether values returned in registers should be at the most
11237 significant end of a register. We want aggregates returned by
11238 value to match the way aggregates are passed to functions. */
11240 static bool
11241 rs6000_return_in_msb (const_tree valtype)
11243 return (DEFAULT_ABI == ABI_ELFv2
11244 && BYTES_BIG_ENDIAN
11245 && AGGREGATE_TYPE_P (valtype)
11246 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11247 == PAD_UPWARD));
11250 #ifdef HAVE_AS_GNU_ATTRIBUTE
11251 /* Return TRUE if a call to function FNDECL may be one that
11252 potentially affects the function calling ABI of the object file. */
11254 static bool
11255 call_ABI_of_interest (tree fndecl)
11257 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11259 struct cgraph_node *c_node;
11261 /* Libcalls are always interesting. */
11262 if (fndecl == NULL_TREE)
11263 return true;
11265 /* Any call to an external function is interesting. */
11266 if (DECL_EXTERNAL (fndecl))
11267 return true;
11269 /* Interesting functions that we are emitting in this object file. */
11270 c_node = cgraph_node::get (fndecl);
11271 c_node = c_node->ultimate_alias_target ();
11272 return !c_node->only_called_directly_p ();
11274 return false;
11276 #endif
11278 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11279 for a call to a function whose data type is FNTYPE.
11280 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11282 For incoming args we set the number of arguments in the prototype large
11283 so we never return a PARALLEL. */
11285 void
11286 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11287 rtx libname ATTRIBUTE_UNUSED, int incoming,
11288 int libcall, int n_named_args,
11289 tree fndecl ATTRIBUTE_UNUSED,
11290 machine_mode return_mode ATTRIBUTE_UNUSED)
11292 static CUMULATIVE_ARGS zero_cumulative;
11294 *cum = zero_cumulative;
11295 cum->words = 0;
11296 cum->fregno = FP_ARG_MIN_REG;
11297 cum->vregno = ALTIVEC_ARG_MIN_REG;
11298 cum->prototype = (fntype && prototype_p (fntype));
11299 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11300 ? CALL_LIBCALL : CALL_NORMAL);
11301 cum->sysv_gregno = GP_ARG_MIN_REG;
11302 cum->stdarg = stdarg_p (fntype);
11303 cum->libcall = libcall;
11305 cum->nargs_prototype = 0;
11306 if (incoming || cum->prototype)
11307 cum->nargs_prototype = n_named_args;
11309 /* Check for a longcall attribute. */
11310 if ((!fntype && rs6000_default_long_calls)
11311 || (fntype
11312 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11313 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11314 cum->call_cookie |= CALL_LONG;
11316 if (TARGET_DEBUG_ARG)
11318 fprintf (stderr, "\ninit_cumulative_args:");
11319 if (fntype)
11321 tree ret_type = TREE_TYPE (fntype);
11322 fprintf (stderr, " ret code = %s,",
11323 get_tree_code_name (TREE_CODE (ret_type)));
11326 if (cum->call_cookie & CALL_LONG)
11327 fprintf (stderr, " longcall,");
11329 fprintf (stderr, " proto = %d, nargs = %d\n",
11330 cum->prototype, cum->nargs_prototype);
11333 #ifdef HAVE_AS_GNU_ATTRIBUTE
11334 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11336 cum->escapes = call_ABI_of_interest (fndecl);
11337 if (cum->escapes)
11339 tree return_type;
11341 if (fntype)
11343 return_type = TREE_TYPE (fntype);
11344 return_mode = TYPE_MODE (return_type);
11346 else
11347 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11349 if (return_type != NULL)
11351 if (TREE_CODE (return_type) == RECORD_TYPE
11352 && TYPE_TRANSPARENT_AGGR (return_type))
11354 return_type = TREE_TYPE (first_field (return_type));
11355 return_mode = TYPE_MODE (return_type);
11357 if (AGGREGATE_TYPE_P (return_type)
11358 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11359 <= 8))
11360 rs6000_returns_struct = true;
11362 if (SCALAR_FLOAT_MODE_P (return_mode))
11364 rs6000_passes_float = true;
11365 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11366 && (FLOAT128_IBM_P (return_mode)
11367 || FLOAT128_IEEE_P (return_mode)
11368 || (return_type != NULL
11369 && (TYPE_MAIN_VARIANT (return_type)
11370 == long_double_type_node))))
11371 rs6000_passes_long_double = true;
11373 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11374 || PAIRED_VECTOR_MODE (return_mode))
11375 rs6000_passes_vector = true;
11378 #endif
11380 if (fntype
11381 && !TARGET_ALTIVEC
11382 && TARGET_ALTIVEC_ABI
11383 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11385 error ("cannot return value in vector register because"
11386 " altivec instructions are disabled, use %qs"
11387 " to enable them", "-maltivec");
11391 /* The mode the ABI uses for a word. This is not the same as word_mode
11392 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11394 static scalar_int_mode
11395 rs6000_abi_word_mode (void)
11397 return TARGET_32BIT ? SImode : DImode;
11400 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11401 static char *
11402 rs6000_offload_options (void)
11404 if (TARGET_64BIT)
11405 return xstrdup ("-foffload-abi=lp64");
11406 else
11407 return xstrdup ("-foffload-abi=ilp32");
11410 /* On rs6000, function arguments are promoted, as are function return
11411 values. */
11413 static machine_mode
11414 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11415 machine_mode mode,
11416 int *punsignedp ATTRIBUTE_UNUSED,
11417 const_tree, int)
11419 PROMOTE_MODE (mode, *punsignedp, type);
11421 return mode;
11424 /* Return true if TYPE must be passed on the stack and not in registers. */
11426 static bool
11427 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11429 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11430 return must_pass_in_stack_var_size (mode, type);
11431 else
11432 return must_pass_in_stack_var_size_or_pad (mode, type);
11435 static inline bool
11436 is_complex_IBM_long_double (machine_mode mode)
11438 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
11441 /* Whether ABI_V4 passes MODE args to a function in floating point
11442 registers. */
11444 static bool
11445 abi_v4_pass_in_fpr (machine_mode mode)
11447 if (!TARGET_HARD_FLOAT)
11448 return false;
11449 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11450 return true;
11451 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11452 return true;
11453 /* ABI_V4 passes complex IBM long double in 8 gprs.
11454 Stupid, but we can't change the ABI now. */
11455 if (is_complex_IBM_long_double (mode))
11456 return false;
11457 if (FLOAT128_2REG_P (mode))
11458 return true;
11459 if (DECIMAL_FLOAT_MODE_P (mode))
11460 return true;
11461 return false;
11464 /* Implement TARGET_FUNCTION_ARG_PADDING.
11466 For the AIX ABI structs are always stored left shifted in their
11467 argument slot. */
11469 static pad_direction
11470 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11472 #ifndef AGGREGATE_PADDING_FIXED
11473 #define AGGREGATE_PADDING_FIXED 0
11474 #endif
11475 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11476 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11477 #endif
11479 if (!AGGREGATE_PADDING_FIXED)
11481 /* GCC used to pass structures of the same size as integer types as
11482 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11483 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11484 passed padded downward, except that -mstrict-align further
11485 muddied the water in that multi-component structures of 2 and 4
11486 bytes in size were passed padded upward.
11488 The following arranges for best compatibility with previous
11489 versions of gcc, but removes the -mstrict-align dependency. */
11490 if (BYTES_BIG_ENDIAN)
11492 HOST_WIDE_INT size = 0;
11494 if (mode == BLKmode)
11496 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11497 size = int_size_in_bytes (type);
11499 else
11500 size = GET_MODE_SIZE (mode);
11502 if (size == 1 || size == 2 || size == 4)
11503 return PAD_DOWNWARD;
11505 return PAD_UPWARD;
11508 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11510 if (type != 0 && AGGREGATE_TYPE_P (type))
11511 return PAD_UPWARD;
11514 /* Fall back to the default. */
11515 return default_function_arg_padding (mode, type);
11518 /* If defined, a C expression that gives the alignment boundary, in bits,
11519 of an argument with the specified mode and type. If it is not defined,
11520 PARM_BOUNDARY is used for all arguments.
11522 V.4 wants long longs and doubles to be double word aligned. Just
11523 testing the mode size is a boneheaded way to do this as it means
11524 that other types such as complex int are also double word aligned.
11525 However, we're stuck with this because changing the ABI might break
11526 existing library interfaces.
11528 Quadword align Altivec/VSX vectors.
11529 Quadword align large synthetic vector types. */
11531 static unsigned int
11532 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11534 machine_mode elt_mode;
11535 int n_elts;
11537 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11539 if (DEFAULT_ABI == ABI_V4
11540 && (GET_MODE_SIZE (mode) == 8
11541 || (TARGET_HARD_FLOAT
11542 && !is_complex_IBM_long_double (mode)
11543 && FLOAT128_2REG_P (mode))))
11544 return 64;
11545 else if (FLOAT128_VECTOR_P (mode))
11546 return 128;
11547 else if (PAIRED_VECTOR_MODE (mode)
11548 || (type && TREE_CODE (type) == VECTOR_TYPE
11549 && int_size_in_bytes (type) >= 8
11550 && int_size_in_bytes (type) < 16))
11551 return 64;
11552 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11553 || (type && TREE_CODE (type) == VECTOR_TYPE
11554 && int_size_in_bytes (type) >= 16))
11555 return 128;
11557 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11558 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11559 -mcompat-align-parm is used. */
11560 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11561 || DEFAULT_ABI == ABI_ELFv2)
11562 && type && TYPE_ALIGN (type) > 64)
11564 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11565 or homogeneous float/vector aggregates here. We already handled
11566 vector aggregates above, but still need to check for float here. */
11567 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11568 && !SCALAR_FLOAT_MODE_P (elt_mode));
11570 /* We used to check for BLKmode instead of the above aggregate type
11571 check. Warn when this results in any difference to the ABI. */
11572 if (aggregate_p != (mode == BLKmode))
11574 static bool warned;
11575 if (!warned && warn_psabi)
11577 warned = true;
11578 inform (input_location,
11579 "the ABI of passing aggregates with %d-byte alignment"
11580 " has changed in GCC 5",
11581 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11585 if (aggregate_p)
11586 return 128;
11589 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11590 implement the "aggregate type" check as a BLKmode check here; this
11591 means certain aggregate types are in fact not aligned. */
11592 if (TARGET_MACHO && rs6000_darwin64_abi
11593 && mode == BLKmode
11594 && type && TYPE_ALIGN (type) > 64)
11595 return 128;
11597 return PARM_BOUNDARY;
11600 /* The offset in words to the start of the parameter save area. */
11602 static unsigned int
11603 rs6000_parm_offset (void)
11605 return (DEFAULT_ABI == ABI_V4 ? 2
11606 : DEFAULT_ABI == ABI_ELFv2 ? 4
11607 : 6);
11610 /* For a function parm of MODE and TYPE, return the starting word in
11611 the parameter area. NWORDS of the parameter area are already used. */
11613 static unsigned int
11614 rs6000_parm_start (machine_mode mode, const_tree type,
11615 unsigned int nwords)
11617 unsigned int align;
11619 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11620 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11623 /* Compute the size (in words) of a function argument. */
11625 static unsigned long
11626 rs6000_arg_size (machine_mode mode, const_tree type)
11628 unsigned long size;
11630 if (mode != BLKmode)
11631 size = GET_MODE_SIZE (mode);
11632 else
11633 size = int_size_in_bytes (type);
11635 if (TARGET_32BIT)
11636 return (size + 3) >> 2;
11637 else
11638 return (size + 7) >> 3;
11641 /* Use this to flush pending int fields. */
11643 static void
11644 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11645 HOST_WIDE_INT bitpos, int final)
11647 unsigned int startbit, endbit;
11648 int intregs, intoffset;
11650 /* Handle the situations where a float is taking up the first half
11651 of the GPR, and the other half is empty (typically due to
11652 alignment restrictions). We can detect this by a 8-byte-aligned
11653 int field, or by seeing that this is the final flush for this
11654 argument. Count the word and continue on. */
11655 if (cum->floats_in_gpr == 1
11656 && (cum->intoffset % 64 == 0
11657 || (cum->intoffset == -1 && final)))
11659 cum->words++;
11660 cum->floats_in_gpr = 0;
11663 if (cum->intoffset == -1)
11664 return;
11666 intoffset = cum->intoffset;
11667 cum->intoffset = -1;
11668 cum->floats_in_gpr = 0;
11670 if (intoffset % BITS_PER_WORD != 0)
11672 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11673 if (!int_mode_for_size (bits, 0).exists ())
11675 /* We couldn't find an appropriate mode, which happens,
11676 e.g., in packed structs when there are 3 bytes to load.
11677 Back intoffset back to the beginning of the word in this
11678 case. */
11679 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11683 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11684 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11685 intregs = (endbit - startbit) / BITS_PER_WORD;
11686 cum->words += intregs;
11687 /* words should be unsigned. */
11688 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11690 int pad = (endbit/BITS_PER_WORD) - cum->words;
11691 cum->words += pad;
11695 /* The darwin64 ABI calls for us to recurse down through structs,
11696 looking for elements passed in registers. Unfortunately, we have
11697 to track int register count here also because of misalignments
11698 in powerpc alignment mode. */
11700 static void
11701 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11702 const_tree type,
11703 HOST_WIDE_INT startbitpos)
11705 tree f;
11707 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11708 if (TREE_CODE (f) == FIELD_DECL)
11710 HOST_WIDE_INT bitpos = startbitpos;
11711 tree ftype = TREE_TYPE (f);
11712 machine_mode mode;
11713 if (ftype == error_mark_node)
11714 continue;
11715 mode = TYPE_MODE (ftype);
11717 if (DECL_SIZE (f) != 0
11718 && tree_fits_uhwi_p (bit_position (f)))
11719 bitpos += int_bit_position (f);
11721 /* ??? FIXME: else assume zero offset. */
11723 if (TREE_CODE (ftype) == RECORD_TYPE)
11724 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11725 else if (USE_FP_FOR_ARG_P (cum, mode))
11727 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11728 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11729 cum->fregno += n_fpregs;
11730 /* Single-precision floats present a special problem for
11731 us, because they are smaller than an 8-byte GPR, and so
11732 the structure-packing rules combined with the standard
11733 varargs behavior mean that we want to pack float/float
11734 and float/int combinations into a single register's
11735 space. This is complicated by the arg advance flushing,
11736 which works on arbitrarily large groups of int-type
11737 fields. */
11738 if (mode == SFmode)
11740 if (cum->floats_in_gpr == 1)
11742 /* Two floats in a word; count the word and reset
11743 the float count. */
11744 cum->words++;
11745 cum->floats_in_gpr = 0;
11747 else if (bitpos % 64 == 0)
11749 /* A float at the beginning of an 8-byte word;
11750 count it and put off adjusting cum->words until
11751 we see if a arg advance flush is going to do it
11752 for us. */
11753 cum->floats_in_gpr++;
11755 else
11757 /* The float is at the end of a word, preceded
11758 by integer fields, so the arg advance flush
11759 just above has already set cum->words and
11760 everything is taken care of. */
11763 else
11764 cum->words += n_fpregs;
11766 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11768 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11769 cum->vregno++;
11770 cum->words += 2;
11772 else if (cum->intoffset == -1)
11773 cum->intoffset = bitpos;
11777 /* Check for an item that needs to be considered specially under the darwin 64
11778 bit ABI. These are record types where the mode is BLK or the structure is
11779 8 bytes in size. */
11780 static int
11781 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11783 return rs6000_darwin64_abi
11784 && ((mode == BLKmode
11785 && TREE_CODE (type) == RECORD_TYPE
11786 && int_size_in_bytes (type) > 0)
11787 || (type && TREE_CODE (type) == RECORD_TYPE
11788 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11791 /* Update the data in CUM to advance over an argument
11792 of mode MODE and data type TYPE.
11793 (TYPE is null for libcalls where that information may not be available.)
11795 Note that for args passed by reference, function_arg will be called
11796 with MODE and TYPE set to that of the pointer to the arg, not the arg
11797 itself. */
11799 static void
11800 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11801 const_tree type, bool named, int depth)
11803 machine_mode elt_mode;
11804 int n_elts;
11806 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11808 /* Only tick off an argument if we're not recursing. */
11809 if (depth == 0)
11810 cum->nargs_prototype--;
11812 #ifdef HAVE_AS_GNU_ATTRIBUTE
11813 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11814 && cum->escapes)
11816 if (SCALAR_FLOAT_MODE_P (mode))
11818 rs6000_passes_float = true;
11819 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11820 && (FLOAT128_IBM_P (mode)
11821 || FLOAT128_IEEE_P (mode)
11822 || (type != NULL
11823 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11824 rs6000_passes_long_double = true;
11826 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11827 || (PAIRED_VECTOR_MODE (mode)
11828 && !cum->stdarg
11829 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11830 rs6000_passes_vector = true;
11832 #endif
11834 if (TARGET_ALTIVEC_ABI
11835 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11836 || (type && TREE_CODE (type) == VECTOR_TYPE
11837 && int_size_in_bytes (type) == 16)))
11839 bool stack = false;
11841 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11843 cum->vregno += n_elts;
11845 if (!TARGET_ALTIVEC)
11846 error ("cannot pass argument in vector register because"
11847 " altivec instructions are disabled, use %qs"
11848 " to enable them", "-maltivec");
11850 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11851 even if it is going to be passed in a vector register.
11852 Darwin does the same for variable-argument functions. */
11853 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11854 && TARGET_64BIT)
11855 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11856 stack = true;
11858 else
11859 stack = true;
11861 if (stack)
11863 int align;
11865 /* Vector parameters must be 16-byte aligned. In 32-bit
11866 mode this means we need to take into account the offset
11867 to the parameter save area. In 64-bit mode, they just
11868 have to start on an even word, since the parameter save
11869 area is 16-byte aligned. */
11870 if (TARGET_32BIT)
11871 align = -(rs6000_parm_offset () + cum->words) & 3;
11872 else
11873 align = cum->words & 1;
11874 cum->words += align + rs6000_arg_size (mode, type);
11876 if (TARGET_DEBUG_ARG)
11878 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11879 cum->words, align);
11880 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11881 cum->nargs_prototype, cum->prototype,
11882 GET_MODE_NAME (mode));
11886 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11888 int size = int_size_in_bytes (type);
11889 /* Variable sized types have size == -1 and are
11890 treated as if consisting entirely of ints.
11891 Pad to 16 byte boundary if needed. */
11892 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11893 && (cum->words % 2) != 0)
11894 cum->words++;
11895 /* For varargs, we can just go up by the size of the struct. */
11896 if (!named)
11897 cum->words += (size + 7) / 8;
11898 else
11900 /* It is tempting to say int register count just goes up by
11901 sizeof(type)/8, but this is wrong in a case such as
11902 { int; double; int; } [powerpc alignment]. We have to
11903 grovel through the fields for these too. */
11904 cum->intoffset = 0;
11905 cum->floats_in_gpr = 0;
11906 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11907 rs6000_darwin64_record_arg_advance_flush (cum,
11908 size * BITS_PER_UNIT, 1);
11910 if (TARGET_DEBUG_ARG)
11912 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11913 cum->words, TYPE_ALIGN (type), size);
11914 fprintf (stderr,
11915 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11916 cum->nargs_prototype, cum->prototype,
11917 GET_MODE_NAME (mode));
11920 else if (DEFAULT_ABI == ABI_V4)
11922 if (abi_v4_pass_in_fpr (mode))
11924 /* _Decimal128 must use an even/odd register pair. This assumes
11925 that the register number is odd when fregno is odd. */
11926 if (mode == TDmode && (cum->fregno % 2) == 1)
11927 cum->fregno++;
11929 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11930 <= FP_ARG_V4_MAX_REG)
11931 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11932 else
11934 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11935 if (mode == DFmode || FLOAT128_IBM_P (mode)
11936 || mode == DDmode || mode == TDmode)
11937 cum->words += cum->words & 1;
11938 cum->words += rs6000_arg_size (mode, type);
11941 else
11943 int n_words = rs6000_arg_size (mode, type);
11944 int gregno = cum->sysv_gregno;
11946 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11947 As does any other 2 word item such as complex int due to a
11948 historical mistake. */
11949 if (n_words == 2)
11950 gregno += (1 - gregno) & 1;
11952 /* Multi-reg args are not split between registers and stack. */
11953 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11955 /* Long long is aligned on the stack. So are other 2 word
11956 items such as complex int due to a historical mistake. */
11957 if (n_words == 2)
11958 cum->words += cum->words & 1;
11959 cum->words += n_words;
11962 /* Note: continuing to accumulate gregno past when we've started
11963 spilling to the stack indicates the fact that we've started
11964 spilling to the stack to expand_builtin_saveregs. */
11965 cum->sysv_gregno = gregno + n_words;
11968 if (TARGET_DEBUG_ARG)
11970 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11971 cum->words, cum->fregno);
11972 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11973 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11974 fprintf (stderr, "mode = %4s, named = %d\n",
11975 GET_MODE_NAME (mode), named);
11978 else
11980 int n_words = rs6000_arg_size (mode, type);
11981 int start_words = cum->words;
11982 int align_words = rs6000_parm_start (mode, type, start_words);
11984 cum->words = align_words + n_words;
11986 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11988 /* _Decimal128 must be passed in an even/odd float register pair.
11989 This assumes that the register number is odd when fregno is
11990 odd. */
11991 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11992 cum->fregno++;
11993 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11996 if (TARGET_DEBUG_ARG)
11998 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11999 cum->words, cum->fregno);
12000 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12001 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12002 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12003 named, align_words - start_words, depth);
12008 static void
12009 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12010 const_tree type, bool named)
12012 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12016 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12017 structure between cum->intoffset and bitpos to integer registers. */
12019 static void
12020 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12021 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12023 machine_mode mode;
12024 unsigned int regno;
12025 unsigned int startbit, endbit;
12026 int this_regno, intregs, intoffset;
12027 rtx reg;
12029 if (cum->intoffset == -1)
12030 return;
12032 intoffset = cum->intoffset;
12033 cum->intoffset = -1;
12035 /* If this is the trailing part of a word, try to only load that
12036 much into the register. Otherwise load the whole register. Note
12037 that in the latter case we may pick up unwanted bits. It's not a
12038 problem at the moment but may wish to revisit. */
12040 if (intoffset % BITS_PER_WORD != 0)
12042 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12043 if (!int_mode_for_size (bits, 0).exists (&mode))
12045 /* We couldn't find an appropriate mode, which happens,
12046 e.g., in packed structs when there are 3 bytes to load.
12047 Back intoffset back to the beginning of the word in this
12048 case. */
12049 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12050 mode = word_mode;
12053 else
12054 mode = word_mode;
12056 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12057 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12058 intregs = (endbit - startbit) / BITS_PER_WORD;
12059 this_regno = cum->words + intoffset / BITS_PER_WORD;
12061 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12062 cum->use_stack = 1;
12064 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12065 if (intregs <= 0)
12066 return;
12068 intoffset /= BITS_PER_UNIT;
12071 regno = GP_ARG_MIN_REG + this_regno;
12072 reg = gen_rtx_REG (mode, regno);
12073 rvec[(*k)++] =
12074 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12076 this_regno += 1;
12077 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12078 mode = word_mode;
12079 intregs -= 1;
12081 while (intregs > 0);
12084 /* Recursive workhorse for the following. */
12086 static void
12087 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12088 HOST_WIDE_INT startbitpos, rtx rvec[],
12089 int *k)
12091 tree f;
12093 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12094 if (TREE_CODE (f) == FIELD_DECL)
12096 HOST_WIDE_INT bitpos = startbitpos;
12097 tree ftype = TREE_TYPE (f);
12098 machine_mode mode;
12099 if (ftype == error_mark_node)
12100 continue;
12101 mode = TYPE_MODE (ftype);
12103 if (DECL_SIZE (f) != 0
12104 && tree_fits_uhwi_p (bit_position (f)))
12105 bitpos += int_bit_position (f);
12107 /* ??? FIXME: else assume zero offset. */
12109 if (TREE_CODE (ftype) == RECORD_TYPE)
12110 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12111 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12113 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12114 #if 0
12115 switch (mode)
12117 case E_SCmode: mode = SFmode; break;
12118 case E_DCmode: mode = DFmode; break;
12119 case E_TCmode: mode = TFmode; break;
12120 default: break;
12122 #endif
12123 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12124 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12126 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12127 && (mode == TFmode || mode == TDmode));
12128 /* Long double or _Decimal128 split over regs and memory. */
12129 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12130 cum->use_stack=1;
12132 rvec[(*k)++]
12133 = gen_rtx_EXPR_LIST (VOIDmode,
12134 gen_rtx_REG (mode, cum->fregno++),
12135 GEN_INT (bitpos / BITS_PER_UNIT));
12136 if (FLOAT128_2REG_P (mode))
12137 cum->fregno++;
12139 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12141 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12142 rvec[(*k)++]
12143 = gen_rtx_EXPR_LIST (VOIDmode,
12144 gen_rtx_REG (mode, cum->vregno++),
12145 GEN_INT (bitpos / BITS_PER_UNIT));
12147 else if (cum->intoffset == -1)
12148 cum->intoffset = bitpos;
12152 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12153 the register(s) to be used for each field and subfield of a struct
12154 being passed by value, along with the offset of where the
12155 register's value may be found in the block. FP fields go in FP
12156 register, vector fields go in vector registers, and everything
12157 else goes in int registers, packed as in memory.
12159 This code is also used for function return values. RETVAL indicates
12160 whether this is the case.
12162 Much of this is taken from the SPARC V9 port, which has a similar
12163 calling convention. */
12165 static rtx
12166 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12167 bool named, bool retval)
12169 rtx rvec[FIRST_PSEUDO_REGISTER];
12170 int k = 1, kbase = 1;
12171 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12172 /* This is a copy; modifications are not visible to our caller. */
12173 CUMULATIVE_ARGS copy_cum = *orig_cum;
12174 CUMULATIVE_ARGS *cum = &copy_cum;
12176 /* Pad to 16 byte boundary if needed. */
12177 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12178 && (cum->words % 2) != 0)
12179 cum->words++;
12181 cum->intoffset = 0;
12182 cum->use_stack = 0;
12183 cum->named = named;
12185 /* Put entries into rvec[] for individual FP and vector fields, and
12186 for the chunks of memory that go in int regs. Note we start at
12187 element 1; 0 is reserved for an indication of using memory, and
12188 may or may not be filled in below. */
12189 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12190 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12192 /* If any part of the struct went on the stack put all of it there.
12193 This hack is because the generic code for
12194 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12195 parts of the struct are not at the beginning. */
12196 if (cum->use_stack)
12198 if (retval)
12199 return NULL_RTX; /* doesn't go in registers at all */
12200 kbase = 0;
12201 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12203 if (k > 1 || cum->use_stack)
12204 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12205 else
12206 return NULL_RTX;
12209 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12211 static rtx
12212 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12213 int align_words)
12215 int n_units;
12216 int i, k;
12217 rtx rvec[GP_ARG_NUM_REG + 1];
12219 if (align_words >= GP_ARG_NUM_REG)
12220 return NULL_RTX;
12222 n_units = rs6000_arg_size (mode, type);
12224 /* Optimize the simple case where the arg fits in one gpr, except in
12225 the case of BLKmode due to assign_parms assuming that registers are
12226 BITS_PER_WORD wide. */
12227 if (n_units == 0
12228 || (n_units == 1 && mode != BLKmode))
12229 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12231 k = 0;
12232 if (align_words + n_units > GP_ARG_NUM_REG)
12233 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12234 using a magic NULL_RTX component.
12235 This is not strictly correct. Only some of the arg belongs in
12236 memory, not all of it. However, the normal scheme using
12237 function_arg_partial_nregs can result in unusual subregs, eg.
12238 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12239 store the whole arg to memory is often more efficient than code
12240 to store pieces, and we know that space is available in the right
12241 place for the whole arg. */
12242 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12244 i = 0;
12247 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12248 rtx off = GEN_INT (i++ * 4);
12249 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12251 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12253 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12256 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12257 but must also be copied into the parameter save area starting at
12258 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12259 to the GPRs and/or memory. Return the number of elements used. */
12261 static int
12262 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12263 int align_words, rtx *rvec)
12265 int k = 0;
12267 if (align_words < GP_ARG_NUM_REG)
12269 int n_words = rs6000_arg_size (mode, type);
12271 if (align_words + n_words > GP_ARG_NUM_REG
12272 || mode == BLKmode
12273 || (TARGET_32BIT && TARGET_POWERPC64))
12275 /* If this is partially on the stack, then we only
12276 include the portion actually in registers here. */
12277 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12278 int i = 0;
12280 if (align_words + n_words > GP_ARG_NUM_REG)
12282 /* Not all of the arg fits in gprs. Say that it goes in memory
12283 too, using a magic NULL_RTX component. Also see comment in
12284 rs6000_mixed_function_arg for why the normal
12285 function_arg_partial_nregs scheme doesn't work in this case. */
12286 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12291 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12292 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12293 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12295 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12297 else
12299 /* The whole arg fits in gprs. */
12300 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12301 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12304 else
12306 /* It's entirely in memory. */
12307 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12310 return k;
12313 /* RVEC is a vector of K components of an argument of mode MODE.
12314 Construct the final function_arg return value from it. */
12316 static rtx
12317 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12319 gcc_assert (k >= 1);
12321 /* Avoid returning a PARALLEL in the trivial cases. */
12322 if (k == 1)
12324 if (XEXP (rvec[0], 0) == NULL_RTX)
12325 return NULL_RTX;
12327 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12328 return XEXP (rvec[0], 0);
12331 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12334 /* Determine where to put an argument to a function.
12335 Value is zero to push the argument on the stack,
12336 or a hard register in which to store the argument.
12338 MODE is the argument's machine mode.
12339 TYPE is the data type of the argument (as a tree).
12340 This is null for libcalls where that information may
12341 not be available.
12342 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12343 the preceding args and about the function being called. It is
12344 not modified in this routine.
12345 NAMED is nonzero if this argument is a named parameter
12346 (otherwise it is an extra parameter matching an ellipsis).
12348 On RS/6000 the first eight words of non-FP are normally in registers
12349 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12350 Under V.4, the first 8 FP args are in registers.
12352 If this is floating-point and no prototype is specified, we use
12353 both an FP and integer register (or possibly FP reg and stack). Library
12354 functions (when CALL_LIBCALL is set) always have the proper types for args,
12355 so we can pass the FP value just in one register. emit_library_function
12356 doesn't support PARALLEL anyway.
12358 Note that for args passed by reference, function_arg will be called
12359 with MODE and TYPE set to that of the pointer to the arg, not the arg
12360 itself. */
12362 static rtx
12363 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12364 const_tree type, bool named)
12366 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12367 enum rs6000_abi abi = DEFAULT_ABI;
12368 machine_mode elt_mode;
12369 int n_elts;
12371 /* Return a marker to indicate whether CR1 needs to set or clear the
12372 bit that V.4 uses to say fp args were passed in registers.
12373 Assume that we don't need the marker for software floating point,
12374 or compiler generated library calls. */
12375 if (mode == VOIDmode)
12377 if (abi == ABI_V4
12378 && (cum->call_cookie & CALL_LIBCALL) == 0
12379 && (cum->stdarg
12380 || (cum->nargs_prototype < 0
12381 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12382 && TARGET_HARD_FLOAT)
12383 return GEN_INT (cum->call_cookie
12384 | ((cum->fregno == FP_ARG_MIN_REG)
12385 ? CALL_V4_SET_FP_ARGS
12386 : CALL_V4_CLEAR_FP_ARGS));
12388 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12391 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12393 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12395 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12396 if (rslt != NULL_RTX)
12397 return rslt;
12398 /* Else fall through to usual handling. */
12401 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12403 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12404 rtx r, off;
12405 int i, k = 0;
12407 /* Do we also need to pass this argument in the parameter save area?
12408 Library support functions for IEEE 128-bit are assumed to not need the
12409 value passed both in GPRs and in vector registers. */
12410 if (TARGET_64BIT && !cum->prototype
12411 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12413 int align_words = ROUND_UP (cum->words, 2);
12414 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12417 /* Describe where this argument goes in the vector registers. */
12418 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12420 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12421 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12422 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12425 return rs6000_finish_function_arg (mode, rvec, k);
12427 else if (TARGET_ALTIVEC_ABI
12428 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12429 || (type && TREE_CODE (type) == VECTOR_TYPE
12430 && int_size_in_bytes (type) == 16)))
12432 if (named || abi == ABI_V4)
12433 return NULL_RTX;
12434 else
12436 /* Vector parameters to varargs functions under AIX or Darwin
12437 get passed in memory and possibly also in GPRs. */
12438 int align, align_words, n_words;
12439 machine_mode part_mode;
12441 /* Vector parameters must be 16-byte aligned. In 32-bit
12442 mode this means we need to take into account the offset
12443 to the parameter save area. In 64-bit mode, they just
12444 have to start on an even word, since the parameter save
12445 area is 16-byte aligned. */
12446 if (TARGET_32BIT)
12447 align = -(rs6000_parm_offset () + cum->words) & 3;
12448 else
12449 align = cum->words & 1;
12450 align_words = cum->words + align;
12452 /* Out of registers? Memory, then. */
12453 if (align_words >= GP_ARG_NUM_REG)
12454 return NULL_RTX;
12456 if (TARGET_32BIT && TARGET_POWERPC64)
12457 return rs6000_mixed_function_arg (mode, type, align_words);
12459 /* The vector value goes in GPRs. Only the part of the
12460 value in GPRs is reported here. */
12461 part_mode = mode;
12462 n_words = rs6000_arg_size (mode, type);
12463 if (align_words + n_words > GP_ARG_NUM_REG)
12464 /* Fortunately, there are only two possibilities, the value
12465 is either wholly in GPRs or half in GPRs and half not. */
12466 part_mode = DImode;
12468 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12472 else if (abi == ABI_V4)
12474 if (abi_v4_pass_in_fpr (mode))
12476 /* _Decimal128 must use an even/odd register pair. This assumes
12477 that the register number is odd when fregno is odd. */
12478 if (mode == TDmode && (cum->fregno % 2) == 1)
12479 cum->fregno++;
12481 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12482 <= FP_ARG_V4_MAX_REG)
12483 return gen_rtx_REG (mode, cum->fregno);
12484 else
12485 return NULL_RTX;
12487 else
12489 int n_words = rs6000_arg_size (mode, type);
12490 int gregno = cum->sysv_gregno;
12492 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12493 As does any other 2 word item such as complex int due to a
12494 historical mistake. */
12495 if (n_words == 2)
12496 gregno += (1 - gregno) & 1;
12498 /* Multi-reg args are not split between registers and stack. */
12499 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12500 return NULL_RTX;
12502 if (TARGET_32BIT && TARGET_POWERPC64)
12503 return rs6000_mixed_function_arg (mode, type,
12504 gregno - GP_ARG_MIN_REG);
12505 return gen_rtx_REG (mode, gregno);
12508 else
12510 int align_words = rs6000_parm_start (mode, type, cum->words);
12512 /* _Decimal128 must be passed in an even/odd float register pair.
12513 This assumes that the register number is odd when fregno is odd. */
12514 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12515 cum->fregno++;
12517 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12519 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12520 rtx r, off;
12521 int i, k = 0;
12522 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12523 int fpr_words;
12525 /* Do we also need to pass this argument in the parameter
12526 save area? */
12527 if (type && (cum->nargs_prototype <= 0
12528 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12529 && TARGET_XL_COMPAT
12530 && align_words >= GP_ARG_NUM_REG)))
12531 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12533 /* Describe where this argument goes in the fprs. */
12534 for (i = 0; i < n_elts
12535 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12537 /* Check if the argument is split over registers and memory.
12538 This can only ever happen for long double or _Decimal128;
12539 complex types are handled via split_complex_arg. */
12540 machine_mode fmode = elt_mode;
12541 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12543 gcc_assert (FLOAT128_2REG_P (fmode));
12544 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12547 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12548 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12549 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12552 /* If there were not enough FPRs to hold the argument, the rest
12553 usually goes into memory. However, if the current position
12554 is still within the register parameter area, a portion may
12555 actually have to go into GPRs.
12557 Note that it may happen that the portion of the argument
12558 passed in the first "half" of the first GPR was already
12559 passed in the last FPR as well.
12561 For unnamed arguments, we already set up GPRs to cover the
12562 whole argument in rs6000_psave_function_arg, so there is
12563 nothing further to do at this point. */
12564 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12565 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12566 && cum->nargs_prototype > 0)
12568 static bool warned;
12570 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12571 int n_words = rs6000_arg_size (mode, type);
12573 align_words += fpr_words;
12574 n_words -= fpr_words;
12578 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12579 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12580 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12582 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12584 if (!warned && warn_psabi)
12586 warned = true;
12587 inform (input_location,
12588 "the ABI of passing homogeneous float aggregates"
12589 " has changed in GCC 5");
12593 return rs6000_finish_function_arg (mode, rvec, k);
12595 else if (align_words < GP_ARG_NUM_REG)
12597 if (TARGET_32BIT && TARGET_POWERPC64)
12598 return rs6000_mixed_function_arg (mode, type, align_words);
12600 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12602 else
12603 return NULL_RTX;
12607 /* For an arg passed partly in registers and partly in memory, this is
12608 the number of bytes passed in registers. For args passed entirely in
12609 registers or entirely in memory, zero. When an arg is described by a
12610 PARALLEL, perhaps using more than one register type, this function
12611 returns the number of bytes used by the first element of the PARALLEL. */
12613 static int
12614 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12615 tree type, bool named)
12617 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12618 bool passed_in_gprs = true;
12619 int ret = 0;
12620 int align_words;
12621 machine_mode elt_mode;
12622 int n_elts;
12624 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12626 if (DEFAULT_ABI == ABI_V4)
12627 return 0;
12629 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12631 /* If we are passing this arg in the fixed parameter save area (gprs or
12632 memory) as well as VRs, we do not use the partial bytes mechanism;
12633 instead, rs6000_function_arg will return a PARALLEL including a memory
12634 element as necessary. Library support functions for IEEE 128-bit are
12635 assumed to not need the value passed both in GPRs and in vector
12636 registers. */
12637 if (TARGET_64BIT && !cum->prototype
12638 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12639 return 0;
12641 /* Otherwise, we pass in VRs only. Check for partial copies. */
12642 passed_in_gprs = false;
12643 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12644 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12647 /* In this complicated case we just disable the partial_nregs code. */
12648 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12649 return 0;
12651 align_words = rs6000_parm_start (mode, type, cum->words);
12653 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12655 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12657 /* If we are passing this arg in the fixed parameter save area
12658 (gprs or memory) as well as FPRs, we do not use the partial
12659 bytes mechanism; instead, rs6000_function_arg will return a
12660 PARALLEL including a memory element as necessary. */
12661 if (type
12662 && (cum->nargs_prototype <= 0
12663 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12664 && TARGET_XL_COMPAT
12665 && align_words >= GP_ARG_NUM_REG)))
12666 return 0;
12668 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12669 passed_in_gprs = false;
12670 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12672 /* Compute number of bytes / words passed in FPRs. If there
12673 is still space available in the register parameter area
12674 *after* that amount, a part of the argument will be passed
12675 in GPRs. In that case, the total amount passed in any
12676 registers is equal to the amount that would have been passed
12677 in GPRs if everything were passed there, so we fall back to
12678 the GPR code below to compute the appropriate value. */
12679 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12680 * MIN (8, GET_MODE_SIZE (elt_mode)));
12681 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12683 if (align_words + fpr_words < GP_ARG_NUM_REG)
12684 passed_in_gprs = true;
12685 else
12686 ret = fpr;
12690 if (passed_in_gprs
12691 && align_words < GP_ARG_NUM_REG
12692 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12693 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12695 if (ret != 0 && TARGET_DEBUG_ARG)
12696 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12698 return ret;
12701 /* A C expression that indicates when an argument must be passed by
12702 reference. If nonzero for an argument, a copy of that argument is
12703 made in memory and a pointer to the argument is passed instead of
12704 the argument itself. The pointer is passed in whatever way is
12705 appropriate for passing a pointer to that type.
12707 Under V.4, aggregates and long double are passed by reference.
12709 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12710 reference unless the AltiVec vector extension ABI is in force.
12712 As an extension to all ABIs, variable sized types are passed by
12713 reference. */
12715 static bool
12716 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12717 machine_mode mode, const_tree type,
12718 bool named ATTRIBUTE_UNUSED)
12720 if (!type)
12721 return 0;
12723 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12724 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12726 if (TARGET_DEBUG_ARG)
12727 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12728 return 1;
12731 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12733 if (TARGET_DEBUG_ARG)
12734 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12735 return 1;
12738 if (int_size_in_bytes (type) < 0)
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12742 return 1;
12745 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12746 modes only exist for GCC vector types if -maltivec. */
12747 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12749 if (TARGET_DEBUG_ARG)
12750 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12751 return 1;
12754 /* Pass synthetic vectors in memory. */
12755 if (TREE_CODE (type) == VECTOR_TYPE
12756 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12758 static bool warned_for_pass_big_vectors = false;
12759 if (TARGET_DEBUG_ARG)
12760 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12761 if (!warned_for_pass_big_vectors)
12763 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12764 "non-standard ABI extension with no compatibility "
12765 "guarantee");
12766 warned_for_pass_big_vectors = true;
12768 return 1;
12771 return 0;
12774 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12775 already processes. Return true if the parameter must be passed
12776 (fully or partially) on the stack. */
12778 static bool
12779 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12781 machine_mode mode;
12782 int unsignedp;
12783 rtx entry_parm;
12785 /* Catch errors. */
12786 if (type == NULL || type == error_mark_node)
12787 return true;
12789 /* Handle types with no storage requirement. */
12790 if (TYPE_MODE (type) == VOIDmode)
12791 return false;
12793 /* Handle complex types. */
12794 if (TREE_CODE (type) == COMPLEX_TYPE)
12795 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12796 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12798 /* Handle transparent aggregates. */
12799 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12800 && TYPE_TRANSPARENT_AGGR (type))
12801 type = TREE_TYPE (first_field (type));
12803 /* See if this arg was passed by invisible reference. */
12804 if (pass_by_reference (get_cumulative_args (args_so_far),
12805 TYPE_MODE (type), type, true))
12806 type = build_pointer_type (type);
12808 /* Find mode as it is passed by the ABI. */
12809 unsignedp = TYPE_UNSIGNED (type);
12810 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12812 /* If we must pass in stack, we need a stack. */
12813 if (rs6000_must_pass_in_stack (mode, type))
12814 return true;
12816 /* If there is no incoming register, we need a stack. */
12817 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12818 if (entry_parm == NULL)
12819 return true;
12821 /* Likewise if we need to pass both in registers and on the stack. */
12822 if (GET_CODE (entry_parm) == PARALLEL
12823 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12824 return true;
12826 /* Also true if we're partially in registers and partially not. */
12827 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12828 return true;
12830 /* Update info on where next arg arrives in registers. */
12831 rs6000_function_arg_advance (args_so_far, mode, type, true);
12832 return false;
12835 /* Return true if FUN has no prototype, has a variable argument
12836 list, or passes any parameter in memory. */
12838 static bool
12839 rs6000_function_parms_need_stack (tree fun, bool incoming)
12841 tree fntype, result;
12842 CUMULATIVE_ARGS args_so_far_v;
12843 cumulative_args_t args_so_far;
12845 if (!fun)
12846 /* Must be a libcall, all of which only use reg parms. */
12847 return false;
12849 fntype = fun;
12850 if (!TYPE_P (fun))
12851 fntype = TREE_TYPE (fun);
12853 /* Varargs functions need the parameter save area. */
12854 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12855 return true;
12857 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12858 args_so_far = pack_cumulative_args (&args_so_far_v);
12860 /* When incoming, we will have been passed the function decl.
12861 It is necessary to use the decl to handle K&R style functions,
12862 where TYPE_ARG_TYPES may not be available. */
12863 if (incoming)
12865 gcc_assert (DECL_P (fun));
12866 result = DECL_RESULT (fun);
12868 else
12869 result = TREE_TYPE (fntype);
12871 if (result && aggregate_value_p (result, fntype))
12873 if (!TYPE_P (result))
12874 result = TREE_TYPE (result);
12875 result = build_pointer_type (result);
12876 rs6000_parm_needs_stack (args_so_far, result);
12879 if (incoming)
12881 tree parm;
12883 for (parm = DECL_ARGUMENTS (fun);
12884 parm && parm != void_list_node;
12885 parm = TREE_CHAIN (parm))
12886 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12887 return true;
12889 else
12891 function_args_iterator args_iter;
12892 tree arg_type;
12894 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12895 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12896 return true;
12899 return false;
12902 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12903 usually a constant depending on the ABI. However, in the ELFv2 ABI
12904 the register parameter area is optional when calling a function that
12905 has a prototype is scope, has no variable argument list, and passes
12906 all parameters in registers. */
12909 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12911 int reg_parm_stack_space;
12913 switch (DEFAULT_ABI)
12915 default:
12916 reg_parm_stack_space = 0;
12917 break;
12919 case ABI_AIX:
12920 case ABI_DARWIN:
12921 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12922 break;
12924 case ABI_ELFv2:
12925 /* ??? Recomputing this every time is a bit expensive. Is there
12926 a place to cache this information? */
12927 if (rs6000_function_parms_need_stack (fun, incoming))
12928 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12929 else
12930 reg_parm_stack_space = 0;
12931 break;
12934 return reg_parm_stack_space;
12937 static void
12938 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12940 int i;
12941 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12943 if (nregs == 0)
12944 return;
12946 for (i = 0; i < nregs; i++)
12948 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12949 if (reload_completed)
12951 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12952 tem = NULL_RTX;
12953 else
12954 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12955 i * GET_MODE_SIZE (reg_mode));
12957 else
12958 tem = replace_equiv_address (tem, XEXP (tem, 0));
12960 gcc_assert (tem);
12962 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12966 /* Perform any needed actions needed for a function that is receiving a
12967 variable number of arguments.
12969 CUM is as above.
12971 MODE and TYPE are the mode and type of the current parameter.
12973 PRETEND_SIZE is a variable that should be set to the amount of stack
12974 that must be pushed by the prolog to pretend that our caller pushed
12977 Normally, this macro will push all remaining incoming registers on the
12978 stack and set PRETEND_SIZE to the length of the registers pushed. */
12980 static void
12981 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12982 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12983 int no_rtl)
12985 CUMULATIVE_ARGS next_cum;
12986 int reg_size = TARGET_32BIT ? 4 : 8;
12987 rtx save_area = NULL_RTX, mem;
12988 int first_reg_offset;
12989 alias_set_type set;
12991 /* Skip the last named argument. */
12992 next_cum = *get_cumulative_args (cum);
12993 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12995 if (DEFAULT_ABI == ABI_V4)
12997 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12999 if (! no_rtl)
13001 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13002 HOST_WIDE_INT offset = 0;
13004 /* Try to optimize the size of the varargs save area.
13005 The ABI requires that ap.reg_save_area is doubleword
13006 aligned, but we don't need to allocate space for all
13007 the bytes, only those to which we actually will save
13008 anything. */
13009 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13010 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13011 if (TARGET_HARD_FLOAT
13012 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13013 && cfun->va_list_fpr_size)
13015 if (gpr_reg_num)
13016 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13017 * UNITS_PER_FP_WORD;
13018 if (cfun->va_list_fpr_size
13019 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13020 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13021 else
13022 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13023 * UNITS_PER_FP_WORD;
13025 if (gpr_reg_num)
13027 offset = -((first_reg_offset * reg_size) & ~7);
13028 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13030 gpr_reg_num = cfun->va_list_gpr_size;
13031 if (reg_size == 4 && (first_reg_offset & 1))
13032 gpr_reg_num++;
13034 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13036 else if (fpr_size)
13037 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13038 * UNITS_PER_FP_WORD
13039 - (int) (GP_ARG_NUM_REG * reg_size);
13041 if (gpr_size + fpr_size)
13043 rtx reg_save_area
13044 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13045 gcc_assert (GET_CODE (reg_save_area) == MEM);
13046 reg_save_area = XEXP (reg_save_area, 0);
13047 if (GET_CODE (reg_save_area) == PLUS)
13049 gcc_assert (XEXP (reg_save_area, 0)
13050 == virtual_stack_vars_rtx);
13051 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13052 offset += INTVAL (XEXP (reg_save_area, 1));
13054 else
13055 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13058 cfun->machine->varargs_save_offset = offset;
13059 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13062 else
13064 first_reg_offset = next_cum.words;
13065 save_area = crtl->args.internal_arg_pointer;
13067 if (targetm.calls.must_pass_in_stack (mode, type))
13068 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13071 set = get_varargs_alias_set ();
13072 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13073 && cfun->va_list_gpr_size)
13075 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13077 if (va_list_gpr_counter_field)
13078 /* V4 va_list_gpr_size counts number of registers needed. */
13079 n_gpr = cfun->va_list_gpr_size;
13080 else
13081 /* char * va_list instead counts number of bytes needed. */
13082 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13084 if (nregs > n_gpr)
13085 nregs = n_gpr;
13087 mem = gen_rtx_MEM (BLKmode,
13088 plus_constant (Pmode, save_area,
13089 first_reg_offset * reg_size));
13090 MEM_NOTRAP_P (mem) = 1;
13091 set_mem_alias_set (mem, set);
13092 set_mem_align (mem, BITS_PER_WORD);
13094 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13095 nregs);
13098 /* Save FP registers if needed. */
13099 if (DEFAULT_ABI == ABI_V4
13100 && TARGET_HARD_FLOAT
13101 && ! no_rtl
13102 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13103 && cfun->va_list_fpr_size)
13105 int fregno = next_cum.fregno, nregs;
13106 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13107 rtx lab = gen_label_rtx ();
13108 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13109 * UNITS_PER_FP_WORD);
13111 emit_jump_insn
13112 (gen_rtx_SET (pc_rtx,
13113 gen_rtx_IF_THEN_ELSE (VOIDmode,
13114 gen_rtx_NE (VOIDmode, cr1,
13115 const0_rtx),
13116 gen_rtx_LABEL_REF (VOIDmode, lab),
13117 pc_rtx)));
13119 for (nregs = 0;
13120 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13121 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13123 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13124 ? DFmode : SFmode,
13125 plus_constant (Pmode, save_area, off));
13126 MEM_NOTRAP_P (mem) = 1;
13127 set_mem_alias_set (mem, set);
13128 set_mem_align (mem, GET_MODE_ALIGNMENT (
13129 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13130 ? DFmode : SFmode));
13131 emit_move_insn (mem, gen_rtx_REG (
13132 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13133 ? DFmode : SFmode, fregno));
13136 emit_label (lab);
13140 /* Create the va_list data type. */
13142 static tree
13143 rs6000_build_builtin_va_list (void)
13145 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13147 /* For AIX, prefer 'char *' because that's what the system
13148 header files like. */
13149 if (DEFAULT_ABI != ABI_V4)
13150 return build_pointer_type (char_type_node);
13152 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13153 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13154 get_identifier ("__va_list_tag"), record);
13156 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13157 unsigned_char_type_node);
13158 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13159 unsigned_char_type_node);
13160 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13161 every user file. */
13162 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13163 get_identifier ("reserved"), short_unsigned_type_node);
13164 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13165 get_identifier ("overflow_arg_area"),
13166 ptr_type_node);
13167 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13168 get_identifier ("reg_save_area"),
13169 ptr_type_node);
13171 va_list_gpr_counter_field = f_gpr;
13172 va_list_fpr_counter_field = f_fpr;
13174 DECL_FIELD_CONTEXT (f_gpr) = record;
13175 DECL_FIELD_CONTEXT (f_fpr) = record;
13176 DECL_FIELD_CONTEXT (f_res) = record;
13177 DECL_FIELD_CONTEXT (f_ovf) = record;
13178 DECL_FIELD_CONTEXT (f_sav) = record;
13180 TYPE_STUB_DECL (record) = type_decl;
13181 TYPE_NAME (record) = type_decl;
13182 TYPE_FIELDS (record) = f_gpr;
13183 DECL_CHAIN (f_gpr) = f_fpr;
13184 DECL_CHAIN (f_fpr) = f_res;
13185 DECL_CHAIN (f_res) = f_ovf;
13186 DECL_CHAIN (f_ovf) = f_sav;
13188 layout_type (record);
13190 /* The correct type is an array type of one element. */
13191 return build_array_type (record, build_index_type (size_zero_node));
13194 /* Implement va_start. */
13196 static void
13197 rs6000_va_start (tree valist, rtx nextarg)
13199 HOST_WIDE_INT words, n_gpr, n_fpr;
13200 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13201 tree gpr, fpr, ovf, sav, t;
13203 /* Only SVR4 needs something special. */
13204 if (DEFAULT_ABI != ABI_V4)
13206 std_expand_builtin_va_start (valist, nextarg);
13207 return;
13210 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13211 f_fpr = DECL_CHAIN (f_gpr);
13212 f_res = DECL_CHAIN (f_fpr);
13213 f_ovf = DECL_CHAIN (f_res);
13214 f_sav = DECL_CHAIN (f_ovf);
13216 valist = build_simple_mem_ref (valist);
13217 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13218 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13219 f_fpr, NULL_TREE);
13220 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13221 f_ovf, NULL_TREE);
13222 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13223 f_sav, NULL_TREE);
13225 /* Count number of gp and fp argument registers used. */
13226 words = crtl->args.info.words;
13227 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13228 GP_ARG_NUM_REG);
13229 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13230 FP_ARG_NUM_REG);
13232 if (TARGET_DEBUG_ARG)
13233 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13234 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13235 words, n_gpr, n_fpr);
13237 if (cfun->va_list_gpr_size)
13239 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13240 build_int_cst (NULL_TREE, n_gpr));
13241 TREE_SIDE_EFFECTS (t) = 1;
13242 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13245 if (cfun->va_list_fpr_size)
13247 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13248 build_int_cst (NULL_TREE, n_fpr));
13249 TREE_SIDE_EFFECTS (t) = 1;
13250 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13252 #ifdef HAVE_AS_GNU_ATTRIBUTE
13253 if (call_ABI_of_interest (cfun->decl))
13254 rs6000_passes_float = true;
13255 #endif
13258 /* Find the overflow area. */
13259 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13260 if (words != 0)
13261 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13262 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13263 TREE_SIDE_EFFECTS (t) = 1;
13264 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13266 /* If there were no va_arg invocations, don't set up the register
13267 save area. */
13268 if (!cfun->va_list_gpr_size
13269 && !cfun->va_list_fpr_size
13270 && n_gpr < GP_ARG_NUM_REG
13271 && n_fpr < FP_ARG_V4_MAX_REG)
13272 return;
13274 /* Find the register save area. */
13275 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13276 if (cfun->machine->varargs_save_offset)
13277 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13278 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13279 TREE_SIDE_EFFECTS (t) = 1;
13280 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13283 /* Implement va_arg. */
13285 static tree
13286 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13287 gimple_seq *post_p)
13289 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13290 tree gpr, fpr, ovf, sav, reg, t, u;
13291 int size, rsize, n_reg, sav_ofs, sav_scale;
13292 tree lab_false, lab_over, addr;
13293 int align;
13294 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13295 int regalign = 0;
13296 gimple *stmt;
13298 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13300 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13301 return build_va_arg_indirect_ref (t);
13304 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13305 earlier version of gcc, with the property that it always applied alignment
13306 adjustments to the va-args (even for zero-sized types). The cheapest way
13307 to deal with this is to replicate the effect of the part of
13308 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13309 of relevance.
13310 We don't need to check for pass-by-reference because of the test above.
13311 We can return a simplifed answer, since we know there's no offset to add. */
13313 if (((TARGET_MACHO
13314 && rs6000_darwin64_abi)
13315 || DEFAULT_ABI == ABI_ELFv2
13316 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13317 && integer_zerop (TYPE_SIZE (type)))
13319 unsigned HOST_WIDE_INT align, boundary;
13320 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13321 align = PARM_BOUNDARY / BITS_PER_UNIT;
13322 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13323 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13324 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13325 boundary /= BITS_PER_UNIT;
13326 if (boundary > align)
13328 tree t ;
13329 /* This updates arg ptr by the amount that would be necessary
13330 to align the zero-sized (but not zero-alignment) item. */
13331 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13332 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13333 gimplify_and_add (t, pre_p);
13335 t = fold_convert (sizetype, valist_tmp);
13336 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13337 fold_convert (TREE_TYPE (valist),
13338 fold_build2 (BIT_AND_EXPR, sizetype, t,
13339 size_int (-boundary))));
13340 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13341 gimplify_and_add (t, pre_p);
13343 /* Since it is zero-sized there's no increment for the item itself. */
13344 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13345 return build_va_arg_indirect_ref (valist_tmp);
13348 if (DEFAULT_ABI != ABI_V4)
13350 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13352 tree elem_type = TREE_TYPE (type);
13353 machine_mode elem_mode = TYPE_MODE (elem_type);
13354 int elem_size = GET_MODE_SIZE (elem_mode);
13356 if (elem_size < UNITS_PER_WORD)
13358 tree real_part, imag_part;
13359 gimple_seq post = NULL;
13361 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13362 &post);
13363 /* Copy the value into a temporary, lest the formal temporary
13364 be reused out from under us. */
13365 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13366 gimple_seq_add_seq (pre_p, post);
13368 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13369 post_p);
13371 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13375 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13378 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13379 f_fpr = DECL_CHAIN (f_gpr);
13380 f_res = DECL_CHAIN (f_fpr);
13381 f_ovf = DECL_CHAIN (f_res);
13382 f_sav = DECL_CHAIN (f_ovf);
13384 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13385 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13386 f_fpr, NULL_TREE);
13387 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13388 f_ovf, NULL_TREE);
13389 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13390 f_sav, NULL_TREE);
13392 size = int_size_in_bytes (type);
13393 rsize = (size + 3) / 4;
13394 int pad = 4 * rsize - size;
13395 align = 1;
13397 machine_mode mode = TYPE_MODE (type);
13398 if (abi_v4_pass_in_fpr (mode))
13400 /* FP args go in FP registers, if present. */
13401 reg = fpr;
13402 n_reg = (size + 7) / 8;
13403 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13404 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13405 if (mode != SFmode && mode != SDmode)
13406 align = 8;
13408 else
13410 /* Otherwise into GP registers. */
13411 reg = gpr;
13412 n_reg = rsize;
13413 sav_ofs = 0;
13414 sav_scale = 4;
13415 if (n_reg == 2)
13416 align = 8;
13419 /* Pull the value out of the saved registers.... */
13421 lab_over = NULL;
13422 addr = create_tmp_var (ptr_type_node, "addr");
13424 /* AltiVec vectors never go in registers when -mabi=altivec. */
13425 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13426 align = 16;
13427 else
13429 lab_false = create_artificial_label (input_location);
13430 lab_over = create_artificial_label (input_location);
13432 /* Long long is aligned in the registers. As are any other 2 gpr
13433 item such as complex int due to a historical mistake. */
13434 u = reg;
13435 if (n_reg == 2 && reg == gpr)
13437 regalign = 1;
13438 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13439 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13440 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13441 unshare_expr (reg), u);
13443 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13444 reg number is 0 for f1, so we want to make it odd. */
13445 else if (reg == fpr && mode == TDmode)
13447 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13448 build_int_cst (TREE_TYPE (reg), 1));
13449 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13452 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13453 t = build2 (GE_EXPR, boolean_type_node, u, t);
13454 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13455 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13456 gimplify_and_add (t, pre_p);
13458 t = sav;
13459 if (sav_ofs)
13460 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13462 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13463 build_int_cst (TREE_TYPE (reg), n_reg));
13464 u = fold_convert (sizetype, u);
13465 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13466 t = fold_build_pointer_plus (t, u);
13468 /* _Decimal32 varargs are located in the second word of the 64-bit
13469 FP register for 32-bit binaries. */
13470 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13471 t = fold_build_pointer_plus_hwi (t, size);
13473 /* Args are passed right-aligned. */
13474 if (BYTES_BIG_ENDIAN)
13475 t = fold_build_pointer_plus_hwi (t, pad);
13477 gimplify_assign (addr, t, pre_p);
13479 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13481 stmt = gimple_build_label (lab_false);
13482 gimple_seq_add_stmt (pre_p, stmt);
13484 if ((n_reg == 2 && !regalign) || n_reg > 2)
13486 /* Ensure that we don't find any more args in regs.
13487 Alignment has taken care of for special cases. */
13488 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13492 /* ... otherwise out of the overflow area. */
13494 /* Care for on-stack alignment if needed. */
13495 t = ovf;
13496 if (align != 1)
13498 t = fold_build_pointer_plus_hwi (t, align - 1);
13499 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13500 build_int_cst (TREE_TYPE (t), -align));
13503 /* Args are passed right-aligned. */
13504 if (BYTES_BIG_ENDIAN)
13505 t = fold_build_pointer_plus_hwi (t, pad);
13507 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13509 gimplify_assign (unshare_expr (addr), t, pre_p);
13511 t = fold_build_pointer_plus_hwi (t, size);
13512 gimplify_assign (unshare_expr (ovf), t, pre_p);
13514 if (lab_over)
13516 stmt = gimple_build_label (lab_over);
13517 gimple_seq_add_stmt (pre_p, stmt);
13520 if (STRICT_ALIGNMENT
13521 && (TYPE_ALIGN (type)
13522 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13524 /* The value (of type complex double, for example) may not be
13525 aligned in memory in the saved registers, so copy via a
13526 temporary. (This is the same code as used for SPARC.) */
13527 tree tmp = create_tmp_var (type, "va_arg_tmp");
13528 tree dest_addr = build_fold_addr_expr (tmp);
13530 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13531 3, dest_addr, addr, size_int (rsize * 4));
13533 gimplify_and_add (copy, pre_p);
13534 addr = dest_addr;
13537 addr = fold_convert (ptrtype, addr);
13538 return build_va_arg_indirect_ref (addr);
13541 /* Builtins. */
13543 static void
13544 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13546 tree t;
13547 unsigned classify = rs6000_builtin_info[(int)code].attr;
13548 const char *attr_string = "";
13550 gcc_assert (name != NULL);
13551 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13553 if (rs6000_builtin_decls[(int)code])
13554 fatal_error (input_location,
13555 "internal error: builtin function %qs already processed",
13556 name);
13558 rs6000_builtin_decls[(int)code] = t =
13559 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13561 /* Set any special attributes. */
13562 if ((classify & RS6000_BTC_CONST) != 0)
13564 /* const function, function only depends on the inputs. */
13565 TREE_READONLY (t) = 1;
13566 TREE_NOTHROW (t) = 1;
13567 attr_string = ", const";
13569 else if ((classify & RS6000_BTC_PURE) != 0)
13571 /* pure function, function can read global memory, but does not set any
13572 external state. */
13573 DECL_PURE_P (t) = 1;
13574 TREE_NOTHROW (t) = 1;
13575 attr_string = ", pure";
13577 else if ((classify & RS6000_BTC_FP) != 0)
13579 /* Function is a math function. If rounding mode is on, then treat the
13580 function as not reading global memory, but it can have arbitrary side
13581 effects. If it is off, then assume the function is a const function.
13582 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13583 builtin-attribute.def that is used for the math functions. */
13584 TREE_NOTHROW (t) = 1;
13585 if (flag_rounding_math)
13587 DECL_PURE_P (t) = 1;
13588 DECL_IS_NOVOPS (t) = 1;
13589 attr_string = ", fp, pure";
13591 else
13593 TREE_READONLY (t) = 1;
13594 attr_string = ", fp, const";
13597 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13598 gcc_unreachable ();
13600 if (TARGET_DEBUG_BUILTIN)
13601 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13602 (int)code, name, attr_string);
13605 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13607 #undef RS6000_BUILTIN_0
13608 #undef RS6000_BUILTIN_1
13609 #undef RS6000_BUILTIN_2
13610 #undef RS6000_BUILTIN_3
13611 #undef RS6000_BUILTIN_A
13612 #undef RS6000_BUILTIN_D
13613 #undef RS6000_BUILTIN_H
13614 #undef RS6000_BUILTIN_P
13615 #undef RS6000_BUILTIN_Q
13616 #undef RS6000_BUILTIN_X
13618 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13619 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13620 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13621 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13622 { MASK, ICODE, NAME, ENUM },
13624 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13625 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13626 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13627 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13628 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13629 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13631 static const struct builtin_description bdesc_3arg[] =
13633 #include "rs6000-builtin.def"
13636 /* DST operations: void foo (void *, const int, const char). */
13638 #undef RS6000_BUILTIN_0
13639 #undef RS6000_BUILTIN_1
13640 #undef RS6000_BUILTIN_2
13641 #undef RS6000_BUILTIN_3
13642 #undef RS6000_BUILTIN_A
13643 #undef RS6000_BUILTIN_D
13644 #undef RS6000_BUILTIN_H
13645 #undef RS6000_BUILTIN_P
13646 #undef RS6000_BUILTIN_Q
13647 #undef RS6000_BUILTIN_X
13649 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13650 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13651 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13652 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13653 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13654 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13655 { MASK, ICODE, NAME, ENUM },
13657 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13658 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13659 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13662 static const struct builtin_description bdesc_dst[] =
13664 #include "rs6000-builtin.def"
13667 /* Simple binary operations: VECc = foo (VECa, VECb). */
13669 #undef RS6000_BUILTIN_0
13670 #undef RS6000_BUILTIN_1
13671 #undef RS6000_BUILTIN_2
13672 #undef RS6000_BUILTIN_3
13673 #undef RS6000_BUILTIN_A
13674 #undef RS6000_BUILTIN_D
13675 #undef RS6000_BUILTIN_H
13676 #undef RS6000_BUILTIN_P
13677 #undef RS6000_BUILTIN_Q
13678 #undef RS6000_BUILTIN_X
13680 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13681 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13682 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13683 { MASK, ICODE, NAME, ENUM },
13685 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13686 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13687 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13688 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13689 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13691 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13693 static const struct builtin_description bdesc_2arg[] =
13695 #include "rs6000-builtin.def"
13698 #undef RS6000_BUILTIN_0
13699 #undef RS6000_BUILTIN_1
13700 #undef RS6000_BUILTIN_2
13701 #undef RS6000_BUILTIN_3
13702 #undef RS6000_BUILTIN_A
13703 #undef RS6000_BUILTIN_D
13704 #undef RS6000_BUILTIN_H
13705 #undef RS6000_BUILTIN_P
13706 #undef RS6000_BUILTIN_Q
13707 #undef RS6000_BUILTIN_X
13709 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13710 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13711 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13712 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13713 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13714 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13715 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13716 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13717 { MASK, ICODE, NAME, ENUM },
13719 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13722 /* AltiVec predicates. */
13724 static const struct builtin_description bdesc_altivec_preds[] =
13726 #include "rs6000-builtin.def"
13729 /* PAIRED predicates. */
13730 #undef RS6000_BUILTIN_0
13731 #undef RS6000_BUILTIN_1
13732 #undef RS6000_BUILTIN_2
13733 #undef RS6000_BUILTIN_3
13734 #undef RS6000_BUILTIN_A
13735 #undef RS6000_BUILTIN_D
13736 #undef RS6000_BUILTIN_H
13737 #undef RS6000_BUILTIN_P
13738 #undef RS6000_BUILTIN_Q
13739 #undef RS6000_BUILTIN_X
13741 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13742 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13743 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13744 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13745 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13746 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13747 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13748 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13749 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13750 { MASK, ICODE, NAME, ENUM },
13752 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13754 static const struct builtin_description bdesc_paired_preds[] =
13756 #include "rs6000-builtin.def"
13759 /* ABS* operations. */
13761 #undef RS6000_BUILTIN_0
13762 #undef RS6000_BUILTIN_1
13763 #undef RS6000_BUILTIN_2
13764 #undef RS6000_BUILTIN_3
13765 #undef RS6000_BUILTIN_A
13766 #undef RS6000_BUILTIN_D
13767 #undef RS6000_BUILTIN_H
13768 #undef RS6000_BUILTIN_P
13769 #undef RS6000_BUILTIN_Q
13770 #undef RS6000_BUILTIN_X
13772 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13773 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13774 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13775 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13776 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13777 { MASK, ICODE, NAME, ENUM },
13779 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13780 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13781 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13782 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13783 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13785 static const struct builtin_description bdesc_abs[] =
13787 #include "rs6000-builtin.def"
13790 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13791 foo (VECa). */
13793 #undef RS6000_BUILTIN_0
13794 #undef RS6000_BUILTIN_1
13795 #undef RS6000_BUILTIN_2
13796 #undef RS6000_BUILTIN_3
13797 #undef RS6000_BUILTIN_A
13798 #undef RS6000_BUILTIN_D
13799 #undef RS6000_BUILTIN_H
13800 #undef RS6000_BUILTIN_P
13801 #undef RS6000_BUILTIN_Q
13802 #undef RS6000_BUILTIN_X
13804 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13805 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13806 { MASK, ICODE, NAME, ENUM },
13808 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13809 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13810 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13811 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13812 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13814 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13815 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13817 static const struct builtin_description bdesc_1arg[] =
13819 #include "rs6000-builtin.def"
13822 /* Simple no-argument operations: result = __builtin_darn_32 () */
13824 #undef RS6000_BUILTIN_0
13825 #undef RS6000_BUILTIN_1
13826 #undef RS6000_BUILTIN_2
13827 #undef RS6000_BUILTIN_3
13828 #undef RS6000_BUILTIN_A
13829 #undef RS6000_BUILTIN_D
13830 #undef RS6000_BUILTIN_H
13831 #undef RS6000_BUILTIN_P
13832 #undef RS6000_BUILTIN_Q
13833 #undef RS6000_BUILTIN_X
13835 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13836 { MASK, ICODE, NAME, ENUM },
13838 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13839 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13840 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13841 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13842 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13843 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13844 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13845 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13846 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13848 static const struct builtin_description bdesc_0arg[] =
13850 #include "rs6000-builtin.def"
13853 /* HTM builtins. */
13854 #undef RS6000_BUILTIN_0
13855 #undef RS6000_BUILTIN_1
13856 #undef RS6000_BUILTIN_2
13857 #undef RS6000_BUILTIN_3
13858 #undef RS6000_BUILTIN_A
13859 #undef RS6000_BUILTIN_D
13860 #undef RS6000_BUILTIN_H
13861 #undef RS6000_BUILTIN_P
13862 #undef RS6000_BUILTIN_Q
13863 #undef RS6000_BUILTIN_X
13865 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13866 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13867 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13868 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13869 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13870 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13871 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13872 { MASK, ICODE, NAME, ENUM },
13874 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13875 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13876 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13878 static const struct builtin_description bdesc_htm[] =
13880 #include "rs6000-builtin.def"
13883 #undef RS6000_BUILTIN_0
13884 #undef RS6000_BUILTIN_1
13885 #undef RS6000_BUILTIN_2
13886 #undef RS6000_BUILTIN_3
13887 #undef RS6000_BUILTIN_A
13888 #undef RS6000_BUILTIN_D
13889 #undef RS6000_BUILTIN_H
13890 #undef RS6000_BUILTIN_P
13891 #undef RS6000_BUILTIN_Q
13893 /* Return true if a builtin function is overloaded. */
13894 bool
13895 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13897 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13900 const char *
13901 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13903 return rs6000_builtin_info[(int)fncode].name;
13906 /* Expand an expression EXP that calls a builtin without arguments. */
13907 static rtx
13908 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13910 rtx pat;
13911 machine_mode tmode = insn_data[icode].operand[0].mode;
13913 if (icode == CODE_FOR_nothing)
13914 /* Builtin not supported on this processor. */
13915 return 0;
13917 if (target == 0
13918 || GET_MODE (target) != tmode
13919 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13920 target = gen_reg_rtx (tmode);
13922 pat = GEN_FCN (icode) (target);
13923 if (! pat)
13924 return 0;
13925 emit_insn (pat);
13927 return target;
13931 static rtx
13932 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13934 rtx pat;
13935 tree arg0 = CALL_EXPR_ARG (exp, 0);
13936 tree arg1 = CALL_EXPR_ARG (exp, 1);
13937 rtx op0 = expand_normal (arg0);
13938 rtx op1 = expand_normal (arg1);
13939 machine_mode mode0 = insn_data[icode].operand[0].mode;
13940 machine_mode mode1 = insn_data[icode].operand[1].mode;
13942 if (icode == CODE_FOR_nothing)
13943 /* Builtin not supported on this processor. */
13944 return 0;
13946 /* If we got invalid arguments bail out before generating bad rtl. */
13947 if (arg0 == error_mark_node || arg1 == error_mark_node)
13948 return const0_rtx;
13950 if (GET_CODE (op0) != CONST_INT
13951 || INTVAL (op0) > 255
13952 || INTVAL (op0) < 0)
13954 error ("argument 1 must be an 8-bit field value");
13955 return const0_rtx;
13958 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13959 op0 = copy_to_mode_reg (mode0, op0);
13961 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13962 op1 = copy_to_mode_reg (mode1, op1);
13964 pat = GEN_FCN (icode) (op0, op1);
13965 if (! pat)
13966 return const0_rtx;
13967 emit_insn (pat);
13969 return NULL_RTX;
13972 static rtx
13973 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13975 rtx pat;
13976 tree arg0 = CALL_EXPR_ARG (exp, 0);
13977 rtx op0 = expand_normal (arg0);
13978 machine_mode tmode = insn_data[icode].operand[0].mode;
13979 machine_mode mode0 = insn_data[icode].operand[1].mode;
13981 if (icode == CODE_FOR_nothing)
13982 /* Builtin not supported on this processor. */
13983 return 0;
13985 /* If we got invalid arguments bail out before generating bad rtl. */
13986 if (arg0 == error_mark_node)
13987 return const0_rtx;
13989 if (icode == CODE_FOR_altivec_vspltisb
13990 || icode == CODE_FOR_altivec_vspltish
13991 || icode == CODE_FOR_altivec_vspltisw)
13993 /* Only allow 5-bit *signed* literals. */
13994 if (GET_CODE (op0) != CONST_INT
13995 || INTVAL (op0) > 15
13996 || INTVAL (op0) < -16)
13998 error ("argument 1 must be a 5-bit signed literal");
13999 return CONST0_RTX (tmode);
14003 if (target == 0
14004 || GET_MODE (target) != tmode
14005 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14006 target = gen_reg_rtx (tmode);
14008 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14009 op0 = copy_to_mode_reg (mode0, op0);
14011 pat = GEN_FCN (icode) (target, op0);
14012 if (! pat)
14013 return 0;
14014 emit_insn (pat);
14016 return target;
14019 static rtx
14020 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14022 rtx pat, scratch1, scratch2;
14023 tree arg0 = CALL_EXPR_ARG (exp, 0);
14024 rtx op0 = expand_normal (arg0);
14025 machine_mode tmode = insn_data[icode].operand[0].mode;
14026 machine_mode mode0 = insn_data[icode].operand[1].mode;
14028 /* If we have invalid arguments, bail out before generating bad rtl. */
14029 if (arg0 == error_mark_node)
14030 return const0_rtx;
14032 if (target == 0
14033 || GET_MODE (target) != tmode
14034 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14035 target = gen_reg_rtx (tmode);
14037 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14038 op0 = copy_to_mode_reg (mode0, op0);
14040 scratch1 = gen_reg_rtx (mode0);
14041 scratch2 = gen_reg_rtx (mode0);
14043 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14044 if (! pat)
14045 return 0;
14046 emit_insn (pat);
14048 return target;
14051 static rtx
14052 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14054 rtx pat;
14055 tree arg0 = CALL_EXPR_ARG (exp, 0);
14056 tree arg1 = CALL_EXPR_ARG (exp, 1);
14057 rtx op0 = expand_normal (arg0);
14058 rtx op1 = expand_normal (arg1);
14059 machine_mode tmode = insn_data[icode].operand[0].mode;
14060 machine_mode mode0 = insn_data[icode].operand[1].mode;
14061 machine_mode mode1 = insn_data[icode].operand[2].mode;
14063 if (icode == CODE_FOR_nothing)
14064 /* Builtin not supported on this processor. */
14065 return 0;
14067 /* If we got invalid arguments bail out before generating bad rtl. */
14068 if (arg0 == error_mark_node || arg1 == error_mark_node)
14069 return const0_rtx;
14071 if (icode == CODE_FOR_altivec_vcfux
14072 || icode == CODE_FOR_altivec_vcfsx
14073 || icode == CODE_FOR_altivec_vctsxs
14074 || icode == CODE_FOR_altivec_vctuxs
14075 || icode == CODE_FOR_altivec_vspltb
14076 || icode == CODE_FOR_altivec_vsplth
14077 || icode == CODE_FOR_altivec_vspltw)
14079 /* Only allow 5-bit unsigned literals. */
14080 STRIP_NOPS (arg1);
14081 if (TREE_CODE (arg1) != INTEGER_CST
14082 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14084 error ("argument 2 must be a 5-bit unsigned literal");
14085 return CONST0_RTX (tmode);
14088 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14089 || icode == CODE_FOR_dfptstsfi_lt_dd
14090 || icode == CODE_FOR_dfptstsfi_gt_dd
14091 || icode == CODE_FOR_dfptstsfi_unordered_dd
14092 || icode == CODE_FOR_dfptstsfi_eq_td
14093 || icode == CODE_FOR_dfptstsfi_lt_td
14094 || icode == CODE_FOR_dfptstsfi_gt_td
14095 || icode == CODE_FOR_dfptstsfi_unordered_td)
14097 /* Only allow 6-bit unsigned literals. */
14098 STRIP_NOPS (arg0);
14099 if (TREE_CODE (arg0) != INTEGER_CST
14100 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14102 error ("argument 1 must be a 6-bit unsigned literal");
14103 return CONST0_RTX (tmode);
14106 else if (icode == CODE_FOR_xststdcqp_kf
14107 || icode == CODE_FOR_xststdcqp_tf
14108 || icode == CODE_FOR_xststdcdp
14109 || icode == CODE_FOR_xststdcsp
14110 || icode == CODE_FOR_xvtstdcdp
14111 || icode == CODE_FOR_xvtstdcsp)
14113 /* Only allow 7-bit unsigned literals. */
14114 STRIP_NOPS (arg1);
14115 if (TREE_CODE (arg1) != INTEGER_CST
14116 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14118 error ("argument 2 must be a 7-bit unsigned literal");
14119 return CONST0_RTX (tmode);
14122 else if (icode == CODE_FOR_unpackv1ti
14123 || icode == CODE_FOR_unpackkf
14124 || icode == CODE_FOR_unpacktf
14125 || icode == CODE_FOR_unpackif
14126 || icode == CODE_FOR_unpacktd)
14128 /* Only allow 1-bit unsigned literals. */
14129 STRIP_NOPS (arg1);
14130 if (TREE_CODE (arg1) != INTEGER_CST
14131 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14133 error ("argument 2 must be a 1-bit unsigned literal");
14134 return CONST0_RTX (tmode);
14138 if (target == 0
14139 || GET_MODE (target) != tmode
14140 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14141 target = gen_reg_rtx (tmode);
14143 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14144 op0 = copy_to_mode_reg (mode0, op0);
14145 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14146 op1 = copy_to_mode_reg (mode1, op1);
14148 pat = GEN_FCN (icode) (target, op0, op1);
14149 if (! pat)
14150 return 0;
14151 emit_insn (pat);
14153 return target;
14156 static rtx
14157 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14159 rtx pat, scratch;
14160 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14161 tree arg0 = CALL_EXPR_ARG (exp, 1);
14162 tree arg1 = CALL_EXPR_ARG (exp, 2);
14163 rtx op0 = expand_normal (arg0);
14164 rtx op1 = expand_normal (arg1);
14165 machine_mode tmode = SImode;
14166 machine_mode mode0 = insn_data[icode].operand[1].mode;
14167 machine_mode mode1 = insn_data[icode].operand[2].mode;
14168 int cr6_form_int;
14170 if (TREE_CODE (cr6_form) != INTEGER_CST)
14172 error ("argument 1 of %qs must be a constant",
14173 "__builtin_altivec_predicate");
14174 return const0_rtx;
14176 else
14177 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14179 gcc_assert (mode0 == mode1);
14181 /* If we have invalid arguments, bail out before generating bad rtl. */
14182 if (arg0 == error_mark_node || arg1 == error_mark_node)
14183 return const0_rtx;
14185 if (target == 0
14186 || GET_MODE (target) != tmode
14187 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14188 target = gen_reg_rtx (tmode);
14190 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14191 op0 = copy_to_mode_reg (mode0, op0);
14192 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14193 op1 = copy_to_mode_reg (mode1, op1);
14195 /* Note that for many of the relevant operations (e.g. cmpne or
14196 cmpeq) with float or double operands, it makes more sense for the
14197 mode of the allocated scratch register to select a vector of
14198 integer. But the choice to copy the mode of operand 0 was made
14199 long ago and there are no plans to change it. */
14200 scratch = gen_reg_rtx (mode0);
14202 pat = GEN_FCN (icode) (scratch, op0, op1);
14203 if (! pat)
14204 return 0;
14205 emit_insn (pat);
14207 /* The vec_any* and vec_all* predicates use the same opcodes for two
14208 different operations, but the bits in CR6 will be different
14209 depending on what information we want. So we have to play tricks
14210 with CR6 to get the right bits out.
14212 If you think this is disgusting, look at the specs for the
14213 AltiVec predicates. */
14215 switch (cr6_form_int)
14217 case 0:
14218 emit_insn (gen_cr6_test_for_zero (target));
14219 break;
14220 case 1:
14221 emit_insn (gen_cr6_test_for_zero_reverse (target));
14222 break;
14223 case 2:
14224 emit_insn (gen_cr6_test_for_lt (target));
14225 break;
14226 case 3:
14227 emit_insn (gen_cr6_test_for_lt_reverse (target));
14228 break;
14229 default:
14230 error ("argument 1 of %qs is out of range",
14231 "__builtin_altivec_predicate");
14232 break;
14235 return target;
14238 static rtx
14239 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14241 rtx pat, addr;
14242 tree arg0 = CALL_EXPR_ARG (exp, 0);
14243 tree arg1 = CALL_EXPR_ARG (exp, 1);
14244 machine_mode tmode = insn_data[icode].operand[0].mode;
14245 machine_mode mode0 = Pmode;
14246 machine_mode mode1 = Pmode;
14247 rtx op0 = expand_normal (arg0);
14248 rtx op1 = expand_normal (arg1);
14250 if (icode == CODE_FOR_nothing)
14251 /* Builtin not supported on this processor. */
14252 return 0;
14254 /* If we got invalid arguments bail out before generating bad rtl. */
14255 if (arg0 == error_mark_node || arg1 == error_mark_node)
14256 return const0_rtx;
14258 if (target == 0
14259 || GET_MODE (target) != tmode
14260 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14261 target = gen_reg_rtx (tmode);
14263 op1 = copy_to_mode_reg (mode1, op1);
14265 if (op0 == const0_rtx)
14267 addr = gen_rtx_MEM (tmode, op1);
14269 else
14271 op0 = copy_to_mode_reg (mode0, op0);
14272 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14275 pat = GEN_FCN (icode) (target, addr);
14277 if (! pat)
14278 return 0;
14279 emit_insn (pat);
14281 return target;
14284 /* Return a constant vector for use as a little-endian permute control vector
14285 to reverse the order of elements of the given vector mode. */
14286 static rtx
14287 swap_selector_for_mode (machine_mode mode)
14289 /* These are little endian vectors, so their elements are reversed
14290 from what you would normally expect for a permute control vector. */
14291 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14292 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14293 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14294 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14295 unsigned int *swaparray, i;
14296 rtx perm[16];
14298 switch (mode)
14300 case E_V2DFmode:
14301 case E_V2DImode:
14302 swaparray = swap2;
14303 break;
14304 case E_V4SFmode:
14305 case E_V4SImode:
14306 swaparray = swap4;
14307 break;
14308 case E_V8HImode:
14309 swaparray = swap8;
14310 break;
14311 case E_V16QImode:
14312 swaparray = swap16;
14313 break;
14314 default:
14315 gcc_unreachable ();
14318 for (i = 0; i < 16; ++i)
14319 perm[i] = GEN_INT (swaparray[i]);
14321 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14325 swap_endian_selector_for_mode (machine_mode mode)
14327 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14328 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14329 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14330 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14332 unsigned int *swaparray, i;
14333 rtx perm[16];
14335 switch (mode)
14337 case E_V1TImode:
14338 swaparray = swap1;
14339 break;
14340 case E_V2DFmode:
14341 case E_V2DImode:
14342 swaparray = swap2;
14343 break;
14344 case E_V4SFmode:
14345 case E_V4SImode:
14346 swaparray = swap4;
14347 break;
14348 case E_V8HImode:
14349 swaparray = swap8;
14350 break;
14351 default:
14352 gcc_unreachable ();
14355 for (i = 0; i < 16; ++i)
14356 perm[i] = GEN_INT (swaparray[i]);
14358 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
14359 gen_rtvec_v (16, perm)));
14362 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14363 with -maltivec=be specified. Issue the load followed by an element-
14364 reversing permute. */
14365 void
14366 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14368 rtx tmp = gen_reg_rtx (mode);
14369 rtx load = gen_rtx_SET (tmp, op1);
14370 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14371 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14372 rtx sel = swap_selector_for_mode (mode);
14373 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14375 gcc_assert (REG_P (op0));
14376 emit_insn (par);
14377 emit_insn (gen_rtx_SET (op0, vperm));
14380 /* Generate code for a "stvxl" built-in for a little endian target with
14381 -maltivec=be specified. Issue the store preceded by an element-reversing
14382 permute. */
14383 void
14384 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14386 rtx tmp = gen_reg_rtx (mode);
14387 rtx store = gen_rtx_SET (op0, tmp);
14388 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14389 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14390 rtx sel = swap_selector_for_mode (mode);
14391 rtx vperm;
14393 gcc_assert (REG_P (op1));
14394 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14395 emit_insn (gen_rtx_SET (tmp, vperm));
14396 emit_insn (par);
14399 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14400 specified. Issue the store preceded by an element-reversing permute. */
14401 void
14402 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14404 machine_mode inner_mode = GET_MODE_INNER (mode);
14405 rtx tmp = gen_reg_rtx (mode);
14406 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14407 rtx sel = swap_selector_for_mode (mode);
14408 rtx vperm;
14410 gcc_assert (REG_P (op1));
14411 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14412 emit_insn (gen_rtx_SET (tmp, vperm));
14413 emit_insn (gen_rtx_SET (op0, stvx));
14416 static rtx
14417 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14419 rtx pat, addr;
14420 tree arg0 = CALL_EXPR_ARG (exp, 0);
14421 tree arg1 = CALL_EXPR_ARG (exp, 1);
14422 machine_mode tmode = insn_data[icode].operand[0].mode;
14423 machine_mode mode0 = Pmode;
14424 machine_mode mode1 = Pmode;
14425 rtx op0 = expand_normal (arg0);
14426 rtx op1 = expand_normal (arg1);
14428 if (icode == CODE_FOR_nothing)
14429 /* Builtin not supported on this processor. */
14430 return 0;
14432 /* If we got invalid arguments bail out before generating bad rtl. */
14433 if (arg0 == error_mark_node || arg1 == error_mark_node)
14434 return const0_rtx;
14436 if (target == 0
14437 || GET_MODE (target) != tmode
14438 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14439 target = gen_reg_rtx (tmode);
14441 op1 = copy_to_mode_reg (mode1, op1);
14443 /* For LVX, express the RTL accurately by ANDing the address with -16.
14444 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14445 so the raw address is fine. */
14446 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14447 || icode == CODE_FOR_altivec_lvx_v2di_2op
14448 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14449 || icode == CODE_FOR_altivec_lvx_v4si_2op
14450 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14451 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14453 rtx rawaddr;
14454 if (op0 == const0_rtx)
14455 rawaddr = op1;
14456 else
14458 op0 = copy_to_mode_reg (mode0, op0);
14459 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14461 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14462 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14464 /* For -maltivec=be, emit the load and follow it up with a
14465 permute to swap the elements. */
14466 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14468 rtx temp = gen_reg_rtx (tmode);
14469 emit_insn (gen_rtx_SET (temp, addr));
14471 rtx sel = swap_selector_for_mode (tmode);
14472 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14473 UNSPEC_VPERM);
14474 emit_insn (gen_rtx_SET (target, vperm));
14476 else
14477 emit_insn (gen_rtx_SET (target, addr));
14479 else
14481 if (op0 == const0_rtx)
14482 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14483 else
14485 op0 = copy_to_mode_reg (mode0, op0);
14486 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14487 gen_rtx_PLUS (Pmode, op1, op0));
14490 pat = GEN_FCN (icode) (target, addr);
14491 if (! pat)
14492 return 0;
14493 emit_insn (pat);
14496 return target;
14499 static rtx
14500 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14502 tree arg0 = CALL_EXPR_ARG (exp, 0);
14503 tree arg1 = CALL_EXPR_ARG (exp, 1);
14504 tree arg2 = CALL_EXPR_ARG (exp, 2);
14505 rtx op0 = expand_normal (arg0);
14506 rtx op1 = expand_normal (arg1);
14507 rtx op2 = expand_normal (arg2);
14508 rtx pat, addr;
14509 machine_mode tmode = insn_data[icode].operand[0].mode;
14510 machine_mode mode1 = Pmode;
14511 machine_mode mode2 = Pmode;
14513 /* Invalid arguments. Bail before doing anything stoopid! */
14514 if (arg0 == error_mark_node
14515 || arg1 == error_mark_node
14516 || arg2 == error_mark_node)
14517 return const0_rtx;
14519 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14520 op0 = copy_to_mode_reg (tmode, op0);
14522 op2 = copy_to_mode_reg (mode2, op2);
14524 if (op1 == const0_rtx)
14526 addr = gen_rtx_MEM (tmode, op2);
14528 else
14530 op1 = copy_to_mode_reg (mode1, op1);
14531 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14534 pat = GEN_FCN (icode) (addr, op0);
14535 if (pat)
14536 emit_insn (pat);
14537 return NULL_RTX;
14540 static rtx
14541 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14543 rtx pat;
14544 tree arg0 = CALL_EXPR_ARG (exp, 0);
14545 tree arg1 = CALL_EXPR_ARG (exp, 1);
14546 tree arg2 = CALL_EXPR_ARG (exp, 2);
14547 rtx op0 = expand_normal (arg0);
14548 rtx op1 = expand_normal (arg1);
14549 rtx op2 = expand_normal (arg2);
14550 machine_mode mode0 = insn_data[icode].operand[0].mode;
14551 machine_mode mode1 = insn_data[icode].operand[1].mode;
14552 machine_mode mode2 = insn_data[icode].operand[2].mode;
14554 if (icode == CODE_FOR_nothing)
14555 /* Builtin not supported on this processor. */
14556 return NULL_RTX;
14558 /* If we got invalid arguments bail out before generating bad rtl. */
14559 if (arg0 == error_mark_node
14560 || arg1 == error_mark_node
14561 || arg2 == error_mark_node)
14562 return NULL_RTX;
14564 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14565 op0 = copy_to_mode_reg (mode0, op0);
14566 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14567 op1 = copy_to_mode_reg (mode1, op1);
14568 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14569 op2 = copy_to_mode_reg (mode2, op2);
14571 pat = GEN_FCN (icode) (op0, op1, op2);
14572 if (pat)
14573 emit_insn (pat);
14575 return NULL_RTX;
14578 static rtx
14579 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14581 tree arg0 = CALL_EXPR_ARG (exp, 0);
14582 tree arg1 = CALL_EXPR_ARG (exp, 1);
14583 tree arg2 = CALL_EXPR_ARG (exp, 2);
14584 rtx op0 = expand_normal (arg0);
14585 rtx op1 = expand_normal (arg1);
14586 rtx op2 = expand_normal (arg2);
14587 rtx pat, addr, rawaddr;
14588 machine_mode tmode = insn_data[icode].operand[0].mode;
14589 machine_mode smode = insn_data[icode].operand[1].mode;
14590 machine_mode mode1 = Pmode;
14591 machine_mode mode2 = Pmode;
14593 /* Invalid arguments. Bail before doing anything stoopid! */
14594 if (arg0 == error_mark_node
14595 || arg1 == error_mark_node
14596 || arg2 == error_mark_node)
14597 return const0_rtx;
14599 op2 = copy_to_mode_reg (mode2, op2);
14601 /* For STVX, express the RTL accurately by ANDing the address with -16.
14602 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14603 so the raw address is fine. */
14604 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14605 || icode == CODE_FOR_altivec_stvx_v2di_2op
14606 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14607 || icode == CODE_FOR_altivec_stvx_v4si_2op
14608 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14609 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14611 if (op1 == const0_rtx)
14612 rawaddr = op2;
14613 else
14615 op1 = copy_to_mode_reg (mode1, op1);
14616 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14619 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14620 addr = gen_rtx_MEM (tmode, addr);
14622 op0 = copy_to_mode_reg (tmode, op0);
14624 /* For -maltivec=be, emit a permute to swap the elements, followed
14625 by the store. */
14626 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14628 rtx temp = gen_reg_rtx (tmode);
14629 rtx sel = swap_selector_for_mode (tmode);
14630 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14631 UNSPEC_VPERM);
14632 emit_insn (gen_rtx_SET (temp, vperm));
14633 emit_insn (gen_rtx_SET (addr, temp));
14635 else
14636 emit_insn (gen_rtx_SET (addr, op0));
14638 else
14640 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14641 op0 = copy_to_mode_reg (smode, op0);
14643 if (op1 == const0_rtx)
14644 addr = gen_rtx_MEM (tmode, op2);
14645 else
14647 op1 = copy_to_mode_reg (mode1, op1);
14648 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14651 pat = GEN_FCN (icode) (addr, op0);
14652 if (pat)
14653 emit_insn (pat);
14656 return NULL_RTX;
14659 /* Return the appropriate SPR number associated with the given builtin. */
14660 static inline HOST_WIDE_INT
14661 htm_spr_num (enum rs6000_builtins code)
14663 if (code == HTM_BUILTIN_GET_TFHAR
14664 || code == HTM_BUILTIN_SET_TFHAR)
14665 return TFHAR_SPR;
14666 else if (code == HTM_BUILTIN_GET_TFIAR
14667 || code == HTM_BUILTIN_SET_TFIAR)
14668 return TFIAR_SPR;
14669 else if (code == HTM_BUILTIN_GET_TEXASR
14670 || code == HTM_BUILTIN_SET_TEXASR)
14671 return TEXASR_SPR;
14672 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14673 || code == HTM_BUILTIN_SET_TEXASRU);
14674 return TEXASRU_SPR;
14677 /* Return the appropriate SPR regno associated with the given builtin. */
14678 static inline HOST_WIDE_INT
14679 htm_spr_regno (enum rs6000_builtins code)
14681 if (code == HTM_BUILTIN_GET_TFHAR
14682 || code == HTM_BUILTIN_SET_TFHAR)
14683 return TFHAR_REGNO;
14684 else if (code == HTM_BUILTIN_GET_TFIAR
14685 || code == HTM_BUILTIN_SET_TFIAR)
14686 return TFIAR_REGNO;
14687 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14688 || code == HTM_BUILTIN_SET_TEXASR
14689 || code == HTM_BUILTIN_GET_TEXASRU
14690 || code == HTM_BUILTIN_SET_TEXASRU);
14691 return TEXASR_REGNO;
14694 /* Return the correct ICODE value depending on whether we are
14695 setting or reading the HTM SPRs. */
14696 static inline enum insn_code
14697 rs6000_htm_spr_icode (bool nonvoid)
14699 if (nonvoid)
14700 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14701 else
14702 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14705 /* Expand the HTM builtin in EXP and store the result in TARGET.
14706 Store true in *EXPANDEDP if we found a builtin to expand. */
14707 static rtx
14708 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14710 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14711 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14712 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14713 const struct builtin_description *d;
14714 size_t i;
14716 *expandedp = true;
14718 if (!TARGET_POWERPC64
14719 && (fcode == HTM_BUILTIN_TABORTDC
14720 || fcode == HTM_BUILTIN_TABORTDCI))
14722 size_t uns_fcode = (size_t)fcode;
14723 const char *name = rs6000_builtin_info[uns_fcode].name;
14724 error ("builtin %qs is only valid in 64-bit mode", name);
14725 return const0_rtx;
14728 /* Expand the HTM builtins. */
14729 d = bdesc_htm;
14730 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14731 if (d->code == fcode)
14733 rtx op[MAX_HTM_OPERANDS], pat;
14734 int nopnds = 0;
14735 tree arg;
14736 call_expr_arg_iterator iter;
14737 unsigned attr = rs6000_builtin_info[fcode].attr;
14738 enum insn_code icode = d->icode;
14739 const struct insn_operand_data *insn_op;
14740 bool uses_spr = (attr & RS6000_BTC_SPR);
14741 rtx cr = NULL_RTX;
14743 if (uses_spr)
14744 icode = rs6000_htm_spr_icode (nonvoid);
14745 insn_op = &insn_data[icode].operand[0];
14747 if (nonvoid)
14749 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14750 if (!target
14751 || GET_MODE (target) != tmode
14752 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14753 target = gen_reg_rtx (tmode);
14754 if (uses_spr)
14755 op[nopnds++] = target;
14758 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14760 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14761 return const0_rtx;
14763 insn_op = &insn_data[icode].operand[nopnds];
14765 op[nopnds] = expand_normal (arg);
14767 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14769 if (!strcmp (insn_op->constraint, "n"))
14771 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14772 if (!CONST_INT_P (op[nopnds]))
14773 error ("argument %d must be an unsigned literal", arg_num);
14774 else
14775 error ("argument %d is an unsigned literal that is "
14776 "out of range", arg_num);
14777 return const0_rtx;
14779 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14782 nopnds++;
14785 /* Handle the builtins for extended mnemonics. These accept
14786 no arguments, but map to builtins that take arguments. */
14787 switch (fcode)
14789 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14790 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14791 op[nopnds++] = GEN_INT (1);
14792 if (flag_checking)
14793 attr |= RS6000_BTC_UNARY;
14794 break;
14795 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14796 op[nopnds++] = GEN_INT (0);
14797 if (flag_checking)
14798 attr |= RS6000_BTC_UNARY;
14799 break;
14800 default:
14801 break;
14804 /* If this builtin accesses SPRs, then pass in the appropriate
14805 SPR number and SPR regno as the last two operands. */
14806 if (uses_spr)
14808 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14809 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14810 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14812 /* If this builtin accesses a CR, then pass in a scratch
14813 CR as the last operand. */
14814 else if (attr & RS6000_BTC_CR)
14815 { cr = gen_reg_rtx (CCmode);
14816 op[nopnds++] = cr;
14819 if (flag_checking)
14821 int expected_nopnds = 0;
14822 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14823 expected_nopnds = 1;
14824 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14825 expected_nopnds = 2;
14826 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14827 expected_nopnds = 3;
14828 if (!(attr & RS6000_BTC_VOID))
14829 expected_nopnds += 1;
14830 if (uses_spr)
14831 expected_nopnds += 2;
14833 gcc_assert (nopnds == expected_nopnds
14834 && nopnds <= MAX_HTM_OPERANDS);
14837 switch (nopnds)
14839 case 1:
14840 pat = GEN_FCN (icode) (op[0]);
14841 break;
14842 case 2:
14843 pat = GEN_FCN (icode) (op[0], op[1]);
14844 break;
14845 case 3:
14846 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14847 break;
14848 case 4:
14849 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14850 break;
14851 default:
14852 gcc_unreachable ();
14854 if (!pat)
14855 return NULL_RTX;
14856 emit_insn (pat);
14858 if (attr & RS6000_BTC_CR)
14860 if (fcode == HTM_BUILTIN_TBEGIN)
14862 /* Emit code to set TARGET to true or false depending on
14863 whether the tbegin. instruction successfully or failed
14864 to start a transaction. We do this by placing the 1's
14865 complement of CR's EQ bit into TARGET. */
14866 rtx scratch = gen_reg_rtx (SImode);
14867 emit_insn (gen_rtx_SET (scratch,
14868 gen_rtx_EQ (SImode, cr,
14869 const0_rtx)));
14870 emit_insn (gen_rtx_SET (target,
14871 gen_rtx_XOR (SImode, scratch,
14872 GEN_INT (1))));
14874 else
14876 /* Emit code to copy the 4-bit condition register field
14877 CR into the least significant end of register TARGET. */
14878 rtx scratch1 = gen_reg_rtx (SImode);
14879 rtx scratch2 = gen_reg_rtx (SImode);
14880 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14881 emit_insn (gen_movcc (subreg, cr));
14882 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14883 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14887 if (nonvoid)
14888 return target;
14889 return const0_rtx;
14892 *expandedp = false;
14893 return NULL_RTX;
14896 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14898 static rtx
14899 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14900 rtx target)
14902 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14903 if (fcode == RS6000_BUILTIN_CPU_INIT)
14904 return const0_rtx;
14906 if (target == 0 || GET_MODE (target) != SImode)
14907 target = gen_reg_rtx (SImode);
14909 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14910 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14911 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14912 to a STRING_CST. */
14913 if (TREE_CODE (arg) == ARRAY_REF
14914 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14915 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14916 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14917 arg = TREE_OPERAND (arg, 0);
14919 if (TREE_CODE (arg) != STRING_CST)
14921 error ("builtin %qs only accepts a string argument",
14922 rs6000_builtin_info[(size_t) fcode].name);
14923 return const0_rtx;
14926 if (fcode == RS6000_BUILTIN_CPU_IS)
14928 const char *cpu = TREE_STRING_POINTER (arg);
14929 rtx cpuid = NULL_RTX;
14930 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14931 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14933 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14934 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14935 break;
14937 if (cpuid == NULL_RTX)
14939 /* Invalid CPU argument. */
14940 error ("cpu %qs is an invalid argument to builtin %qs",
14941 cpu, rs6000_builtin_info[(size_t) fcode].name);
14942 return const0_rtx;
14945 rtx platform = gen_reg_rtx (SImode);
14946 rtx tcbmem = gen_const_mem (SImode,
14947 gen_rtx_PLUS (Pmode,
14948 gen_rtx_REG (Pmode, TLS_REGNUM),
14949 GEN_INT (TCB_PLATFORM_OFFSET)));
14950 emit_move_insn (platform, tcbmem);
14951 emit_insn (gen_eqsi3 (target, platform, cpuid));
14953 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14955 const char *hwcap = TREE_STRING_POINTER (arg);
14956 rtx mask = NULL_RTX;
14957 int hwcap_offset;
14958 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14959 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14961 mask = GEN_INT (cpu_supports_info[i].mask);
14962 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14963 break;
14965 if (mask == NULL_RTX)
14967 /* Invalid HWCAP argument. */
14968 error ("%s %qs is an invalid argument to builtin %qs",
14969 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14970 return const0_rtx;
14973 rtx tcb_hwcap = gen_reg_rtx (SImode);
14974 rtx tcbmem = gen_const_mem (SImode,
14975 gen_rtx_PLUS (Pmode,
14976 gen_rtx_REG (Pmode, TLS_REGNUM),
14977 GEN_INT (hwcap_offset)));
14978 emit_move_insn (tcb_hwcap, tcbmem);
14979 rtx scratch1 = gen_reg_rtx (SImode);
14980 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14981 rtx scratch2 = gen_reg_rtx (SImode);
14982 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14983 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14985 else
14986 gcc_unreachable ();
14988 /* Record that we have expanded a CPU builtin, so that we can later
14989 emit a reference to the special symbol exported by LIBC to ensure we
14990 do not link against an old LIBC that doesn't support this feature. */
14991 cpu_builtin_p = true;
14993 #else
14994 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14995 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14997 /* For old LIBCs, always return FALSE. */
14998 emit_move_insn (target, GEN_INT (0));
14999 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15001 return target;
15004 static rtx
15005 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15007 rtx pat;
15008 tree arg0 = CALL_EXPR_ARG (exp, 0);
15009 tree arg1 = CALL_EXPR_ARG (exp, 1);
15010 tree arg2 = CALL_EXPR_ARG (exp, 2);
15011 rtx op0 = expand_normal (arg0);
15012 rtx op1 = expand_normal (arg1);
15013 rtx op2 = expand_normal (arg2);
15014 machine_mode tmode = insn_data[icode].operand[0].mode;
15015 machine_mode mode0 = insn_data[icode].operand[1].mode;
15016 machine_mode mode1 = insn_data[icode].operand[2].mode;
15017 machine_mode mode2 = insn_data[icode].operand[3].mode;
15019 if (icode == CODE_FOR_nothing)
15020 /* Builtin not supported on this processor. */
15021 return 0;
15023 /* If we got invalid arguments bail out before generating bad rtl. */
15024 if (arg0 == error_mark_node
15025 || arg1 == error_mark_node
15026 || arg2 == error_mark_node)
15027 return const0_rtx;
15029 /* Check and prepare argument depending on the instruction code.
15031 Note that a switch statement instead of the sequence of tests
15032 would be incorrect as many of the CODE_FOR values could be
15033 CODE_FOR_nothing and that would yield multiple alternatives
15034 with identical values. We'd never reach here at runtime in
15035 this case. */
15036 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15037 || icode == CODE_FOR_altivec_vsldoi_v2df
15038 || icode == CODE_FOR_altivec_vsldoi_v4si
15039 || icode == CODE_FOR_altivec_vsldoi_v8hi
15040 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15042 /* Only allow 4-bit unsigned literals. */
15043 STRIP_NOPS (arg2);
15044 if (TREE_CODE (arg2) != INTEGER_CST
15045 || TREE_INT_CST_LOW (arg2) & ~0xf)
15047 error ("argument 3 must be a 4-bit unsigned literal");
15048 return CONST0_RTX (tmode);
15051 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15052 || icode == CODE_FOR_vsx_xxpermdi_v2di
15053 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15054 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15055 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15056 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15057 || icode == CODE_FOR_vsx_xxpermdi_v4si
15058 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15059 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15060 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15061 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15062 || icode == CODE_FOR_vsx_xxsldwi_v4si
15063 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15064 || icode == CODE_FOR_vsx_xxsldwi_v2di
15065 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15067 /* Only allow 2-bit unsigned literals. */
15068 STRIP_NOPS (arg2);
15069 if (TREE_CODE (arg2) != INTEGER_CST
15070 || TREE_INT_CST_LOW (arg2) & ~0x3)
15072 error ("argument 3 must be a 2-bit unsigned literal");
15073 return CONST0_RTX (tmode);
15076 else if (icode == CODE_FOR_vsx_set_v2df
15077 || icode == CODE_FOR_vsx_set_v2di
15078 || icode == CODE_FOR_bcdadd
15079 || icode == CODE_FOR_bcdadd_lt
15080 || icode == CODE_FOR_bcdadd_eq
15081 || icode == CODE_FOR_bcdadd_gt
15082 || icode == CODE_FOR_bcdsub
15083 || icode == CODE_FOR_bcdsub_lt
15084 || icode == CODE_FOR_bcdsub_eq
15085 || icode == CODE_FOR_bcdsub_gt)
15087 /* Only allow 1-bit unsigned literals. */
15088 STRIP_NOPS (arg2);
15089 if (TREE_CODE (arg2) != INTEGER_CST
15090 || TREE_INT_CST_LOW (arg2) & ~0x1)
15092 error ("argument 3 must be a 1-bit unsigned literal");
15093 return CONST0_RTX (tmode);
15096 else if (icode == CODE_FOR_dfp_ddedpd_dd
15097 || icode == CODE_FOR_dfp_ddedpd_td)
15099 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15100 STRIP_NOPS (arg0);
15101 if (TREE_CODE (arg0) != INTEGER_CST
15102 || TREE_INT_CST_LOW (arg2) & ~0x3)
15104 error ("argument 1 must be 0 or 2");
15105 return CONST0_RTX (tmode);
15108 else if (icode == CODE_FOR_dfp_denbcd_dd
15109 || icode == CODE_FOR_dfp_denbcd_td)
15111 /* Only allow 1-bit unsigned literals. */
15112 STRIP_NOPS (arg0);
15113 if (TREE_CODE (arg0) != INTEGER_CST
15114 || TREE_INT_CST_LOW (arg0) & ~0x1)
15116 error ("argument 1 must be a 1-bit unsigned literal");
15117 return CONST0_RTX (tmode);
15120 else if (icode == CODE_FOR_dfp_dscli_dd
15121 || icode == CODE_FOR_dfp_dscli_td
15122 || icode == CODE_FOR_dfp_dscri_dd
15123 || icode == CODE_FOR_dfp_dscri_td)
15125 /* Only allow 6-bit unsigned literals. */
15126 STRIP_NOPS (arg1);
15127 if (TREE_CODE (arg1) != INTEGER_CST
15128 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15130 error ("argument 2 must be a 6-bit unsigned literal");
15131 return CONST0_RTX (tmode);
15134 else if (icode == CODE_FOR_crypto_vshasigmaw
15135 || icode == CODE_FOR_crypto_vshasigmad)
15137 /* Check whether the 2nd and 3rd arguments are integer constants and in
15138 range and prepare arguments. */
15139 STRIP_NOPS (arg1);
15140 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
15142 error ("argument 2 must be 0 or 1");
15143 return CONST0_RTX (tmode);
15146 STRIP_NOPS (arg2);
15147 if (TREE_CODE (arg2) != INTEGER_CST
15148 || wi::geu_p (wi::to_wide (arg2), 16))
15150 error ("argument 3 must be in the range 0..15");
15151 return CONST0_RTX (tmode);
15155 if (target == 0
15156 || GET_MODE (target) != tmode
15157 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15158 target = gen_reg_rtx (tmode);
15160 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15161 op0 = copy_to_mode_reg (mode0, op0);
15162 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15163 op1 = copy_to_mode_reg (mode1, op1);
15164 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15165 op2 = copy_to_mode_reg (mode2, op2);
15167 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15168 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15169 else
15170 pat = GEN_FCN (icode) (target, op0, op1, op2);
15171 if (! pat)
15172 return 0;
15173 emit_insn (pat);
15175 return target;
15178 /* Expand the lvx builtins. */
15179 static rtx
15180 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15182 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15183 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15184 tree arg0;
15185 machine_mode tmode, mode0;
15186 rtx pat, op0;
15187 enum insn_code icode;
15189 switch (fcode)
15191 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15192 icode = CODE_FOR_vector_altivec_load_v16qi;
15193 break;
15194 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15195 icode = CODE_FOR_vector_altivec_load_v8hi;
15196 break;
15197 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15198 icode = CODE_FOR_vector_altivec_load_v4si;
15199 break;
15200 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15201 icode = CODE_FOR_vector_altivec_load_v4sf;
15202 break;
15203 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15204 icode = CODE_FOR_vector_altivec_load_v2df;
15205 break;
15206 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15207 icode = CODE_FOR_vector_altivec_load_v2di;
15208 break;
15209 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15210 icode = CODE_FOR_vector_altivec_load_v1ti;
15211 break;
15212 default:
15213 *expandedp = false;
15214 return NULL_RTX;
15217 *expandedp = true;
15219 arg0 = CALL_EXPR_ARG (exp, 0);
15220 op0 = expand_normal (arg0);
15221 tmode = insn_data[icode].operand[0].mode;
15222 mode0 = insn_data[icode].operand[1].mode;
15224 if (target == 0
15225 || GET_MODE (target) != tmode
15226 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15227 target = gen_reg_rtx (tmode);
15229 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15230 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15232 pat = GEN_FCN (icode) (target, op0);
15233 if (! pat)
15234 return 0;
15235 emit_insn (pat);
15236 return target;
15239 /* Expand the stvx builtins. */
15240 static rtx
15241 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15242 bool *expandedp)
15244 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15245 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15246 tree arg0, arg1;
15247 machine_mode mode0, mode1;
15248 rtx pat, op0, op1;
15249 enum insn_code icode;
15251 switch (fcode)
15253 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15254 icode = CODE_FOR_vector_altivec_store_v16qi;
15255 break;
15256 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15257 icode = CODE_FOR_vector_altivec_store_v8hi;
15258 break;
15259 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15260 icode = CODE_FOR_vector_altivec_store_v4si;
15261 break;
15262 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15263 icode = CODE_FOR_vector_altivec_store_v4sf;
15264 break;
15265 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15266 icode = CODE_FOR_vector_altivec_store_v2df;
15267 break;
15268 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15269 icode = CODE_FOR_vector_altivec_store_v2di;
15270 break;
15271 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15272 icode = CODE_FOR_vector_altivec_store_v1ti;
15273 break;
15274 default:
15275 *expandedp = false;
15276 return NULL_RTX;
15279 arg0 = CALL_EXPR_ARG (exp, 0);
15280 arg1 = CALL_EXPR_ARG (exp, 1);
15281 op0 = expand_normal (arg0);
15282 op1 = expand_normal (arg1);
15283 mode0 = insn_data[icode].operand[0].mode;
15284 mode1 = insn_data[icode].operand[1].mode;
15286 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15287 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15288 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15289 op1 = copy_to_mode_reg (mode1, op1);
15291 pat = GEN_FCN (icode) (op0, op1);
15292 if (pat)
15293 emit_insn (pat);
15295 *expandedp = true;
15296 return NULL_RTX;
15299 /* Expand the dst builtins. */
15300 static rtx
15301 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15302 bool *expandedp)
15304 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15305 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15306 tree arg0, arg1, arg2;
15307 machine_mode mode0, mode1;
15308 rtx pat, op0, op1, op2;
15309 const struct builtin_description *d;
15310 size_t i;
15312 *expandedp = false;
15314 /* Handle DST variants. */
15315 d = bdesc_dst;
15316 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15317 if (d->code == fcode)
15319 arg0 = CALL_EXPR_ARG (exp, 0);
15320 arg1 = CALL_EXPR_ARG (exp, 1);
15321 arg2 = CALL_EXPR_ARG (exp, 2);
15322 op0 = expand_normal (arg0);
15323 op1 = expand_normal (arg1);
15324 op2 = expand_normal (arg2);
15325 mode0 = insn_data[d->icode].operand[0].mode;
15326 mode1 = insn_data[d->icode].operand[1].mode;
15328 /* Invalid arguments, bail out before generating bad rtl. */
15329 if (arg0 == error_mark_node
15330 || arg1 == error_mark_node
15331 || arg2 == error_mark_node)
15332 return const0_rtx;
15334 *expandedp = true;
15335 STRIP_NOPS (arg2);
15336 if (TREE_CODE (arg2) != INTEGER_CST
15337 || TREE_INT_CST_LOW (arg2) & ~0x3)
15339 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15340 return const0_rtx;
15343 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15344 op0 = copy_to_mode_reg (Pmode, op0);
15345 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15346 op1 = copy_to_mode_reg (mode1, op1);
15348 pat = GEN_FCN (d->icode) (op0, op1, op2);
15349 if (pat != 0)
15350 emit_insn (pat);
15352 return NULL_RTX;
15355 return NULL_RTX;
15358 /* Expand vec_init builtin. */
15359 static rtx
15360 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15362 machine_mode tmode = TYPE_MODE (type);
15363 machine_mode inner_mode = GET_MODE_INNER (tmode);
15364 int i, n_elt = GET_MODE_NUNITS (tmode);
15366 gcc_assert (VECTOR_MODE_P (tmode));
15367 gcc_assert (n_elt == call_expr_nargs (exp));
15369 if (!target || !register_operand (target, tmode))
15370 target = gen_reg_rtx (tmode);
15372 /* If we have a vector compromised of a single element, such as V1TImode, do
15373 the initialization directly. */
15374 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15376 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15377 emit_move_insn (target, gen_lowpart (tmode, x));
15379 else
15381 rtvec v = rtvec_alloc (n_elt);
15383 for (i = 0; i < n_elt; ++i)
15385 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15386 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15389 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15392 return target;
15395 /* Return the integer constant in ARG. Constrain it to be in the range
15396 of the subparts of VEC_TYPE; issue an error if not. */
15398 static int
15399 get_element_number (tree vec_type, tree arg)
15401 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15403 if (!tree_fits_uhwi_p (arg)
15404 || (elt = tree_to_uhwi (arg), elt > max))
15406 error ("selector must be an integer constant in the range 0..%wi", max);
15407 return 0;
15410 return elt;
15413 /* Expand vec_set builtin. */
15414 static rtx
15415 altivec_expand_vec_set_builtin (tree exp)
15417 machine_mode tmode, mode1;
15418 tree arg0, arg1, arg2;
15419 int elt;
15420 rtx op0, op1;
15422 arg0 = CALL_EXPR_ARG (exp, 0);
15423 arg1 = CALL_EXPR_ARG (exp, 1);
15424 arg2 = CALL_EXPR_ARG (exp, 2);
15426 tmode = TYPE_MODE (TREE_TYPE (arg0));
15427 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15428 gcc_assert (VECTOR_MODE_P (tmode));
15430 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15431 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15432 elt = get_element_number (TREE_TYPE (arg0), arg2);
15434 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15435 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15437 op0 = force_reg (tmode, op0);
15438 op1 = force_reg (mode1, op1);
15440 rs6000_expand_vector_set (op0, op1, elt);
15442 return op0;
15445 /* Expand vec_ext builtin. */
15446 static rtx
15447 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15449 machine_mode tmode, mode0;
15450 tree arg0, arg1;
15451 rtx op0;
15452 rtx op1;
15454 arg0 = CALL_EXPR_ARG (exp, 0);
15455 arg1 = CALL_EXPR_ARG (exp, 1);
15457 op0 = expand_normal (arg0);
15458 op1 = expand_normal (arg1);
15460 /* Call get_element_number to validate arg1 if it is a constant. */
15461 if (TREE_CODE (arg1) == INTEGER_CST)
15462 (void) get_element_number (TREE_TYPE (arg0), arg1);
15464 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15465 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15466 gcc_assert (VECTOR_MODE_P (mode0));
15468 op0 = force_reg (mode0, op0);
15470 if (optimize || !target || !register_operand (target, tmode))
15471 target = gen_reg_rtx (tmode);
15473 rs6000_expand_vector_extract (target, op0, op1);
15475 return target;
15478 /* Expand the builtin in EXP and store the result in TARGET. Store
15479 true in *EXPANDEDP if we found a builtin to expand. */
15480 static rtx
15481 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15483 const struct builtin_description *d;
15484 size_t i;
15485 enum insn_code icode;
15486 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15487 tree arg0, arg1, arg2;
15488 rtx op0, pat;
15489 machine_mode tmode, mode0;
15490 enum rs6000_builtins fcode
15491 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15493 if (rs6000_overloaded_builtin_p (fcode))
15495 *expandedp = true;
15496 error ("unresolved overload for Altivec builtin %qF", fndecl);
15498 /* Given it is invalid, just generate a normal call. */
15499 return expand_call (exp, target, false);
15502 target = altivec_expand_ld_builtin (exp, target, expandedp);
15503 if (*expandedp)
15504 return target;
15506 target = altivec_expand_st_builtin (exp, target, expandedp);
15507 if (*expandedp)
15508 return target;
15510 target = altivec_expand_dst_builtin (exp, target, expandedp);
15511 if (*expandedp)
15512 return target;
15514 *expandedp = true;
15516 switch (fcode)
15518 case ALTIVEC_BUILTIN_STVX_V2DF:
15519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15520 case ALTIVEC_BUILTIN_STVX_V2DI:
15521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15522 case ALTIVEC_BUILTIN_STVX_V4SF:
15523 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15524 case ALTIVEC_BUILTIN_STVX:
15525 case ALTIVEC_BUILTIN_STVX_V4SI:
15526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15527 case ALTIVEC_BUILTIN_STVX_V8HI:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15529 case ALTIVEC_BUILTIN_STVX_V16QI:
15530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15531 case ALTIVEC_BUILTIN_STVEBX:
15532 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15533 case ALTIVEC_BUILTIN_STVEHX:
15534 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15535 case ALTIVEC_BUILTIN_STVEWX:
15536 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15537 case ALTIVEC_BUILTIN_STVXL_V2DF:
15538 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15539 case ALTIVEC_BUILTIN_STVXL_V2DI:
15540 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15541 case ALTIVEC_BUILTIN_STVXL_V4SF:
15542 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15543 case ALTIVEC_BUILTIN_STVXL:
15544 case ALTIVEC_BUILTIN_STVXL_V4SI:
15545 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15546 case ALTIVEC_BUILTIN_STVXL_V8HI:
15547 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15548 case ALTIVEC_BUILTIN_STVXL_V16QI:
15549 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15551 case ALTIVEC_BUILTIN_STVLX:
15552 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15553 case ALTIVEC_BUILTIN_STVLXL:
15554 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15555 case ALTIVEC_BUILTIN_STVRX:
15556 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15557 case ALTIVEC_BUILTIN_STVRXL:
15558 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15560 case P9V_BUILTIN_STXVL:
15561 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15563 case P9V_BUILTIN_XST_LEN_R:
15564 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
15566 case VSX_BUILTIN_STXVD2X_V1TI:
15567 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15568 case VSX_BUILTIN_STXVD2X_V2DF:
15569 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15570 case VSX_BUILTIN_STXVD2X_V2DI:
15571 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15572 case VSX_BUILTIN_STXVW4X_V4SF:
15573 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15574 case VSX_BUILTIN_STXVW4X_V4SI:
15575 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15576 case VSX_BUILTIN_STXVW4X_V8HI:
15577 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15578 case VSX_BUILTIN_STXVW4X_V16QI:
15579 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15581 /* For the following on big endian, it's ok to use any appropriate
15582 unaligned-supporting store, so use a generic expander. For
15583 little-endian, the exact element-reversing instruction must
15584 be used. */
15585 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15587 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15588 : CODE_FOR_vsx_st_elemrev_v2df);
15589 return altivec_expand_stv_builtin (code, exp);
15591 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15593 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15594 : CODE_FOR_vsx_st_elemrev_v2di);
15595 return altivec_expand_stv_builtin (code, exp);
15597 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15599 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15600 : CODE_FOR_vsx_st_elemrev_v4sf);
15601 return altivec_expand_stv_builtin (code, exp);
15603 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15605 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15606 : CODE_FOR_vsx_st_elemrev_v4si);
15607 return altivec_expand_stv_builtin (code, exp);
15609 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15611 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15612 : CODE_FOR_vsx_st_elemrev_v8hi);
15613 return altivec_expand_stv_builtin (code, exp);
15615 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15617 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15618 : CODE_FOR_vsx_st_elemrev_v16qi);
15619 return altivec_expand_stv_builtin (code, exp);
15622 case ALTIVEC_BUILTIN_MFVSCR:
15623 icode = CODE_FOR_altivec_mfvscr;
15624 tmode = insn_data[icode].operand[0].mode;
15626 if (target == 0
15627 || GET_MODE (target) != tmode
15628 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15629 target = gen_reg_rtx (tmode);
15631 pat = GEN_FCN (icode) (target);
15632 if (! pat)
15633 return 0;
15634 emit_insn (pat);
15635 return target;
15637 case ALTIVEC_BUILTIN_MTVSCR:
15638 icode = CODE_FOR_altivec_mtvscr;
15639 arg0 = CALL_EXPR_ARG (exp, 0);
15640 op0 = expand_normal (arg0);
15641 mode0 = insn_data[icode].operand[0].mode;
15643 /* If we got invalid arguments bail out before generating bad rtl. */
15644 if (arg0 == error_mark_node)
15645 return const0_rtx;
15647 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15648 op0 = copy_to_mode_reg (mode0, op0);
15650 pat = GEN_FCN (icode) (op0);
15651 if (pat)
15652 emit_insn (pat);
15653 return NULL_RTX;
15655 case ALTIVEC_BUILTIN_DSSALL:
15656 emit_insn (gen_altivec_dssall ());
15657 return NULL_RTX;
15659 case ALTIVEC_BUILTIN_DSS:
15660 icode = CODE_FOR_altivec_dss;
15661 arg0 = CALL_EXPR_ARG (exp, 0);
15662 STRIP_NOPS (arg0);
15663 op0 = expand_normal (arg0);
15664 mode0 = insn_data[icode].operand[0].mode;
15666 /* If we got invalid arguments bail out before generating bad rtl. */
15667 if (arg0 == error_mark_node)
15668 return const0_rtx;
15670 if (TREE_CODE (arg0) != INTEGER_CST
15671 || TREE_INT_CST_LOW (arg0) & ~0x3)
15673 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15674 return const0_rtx;
15677 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15678 op0 = copy_to_mode_reg (mode0, op0);
15680 emit_insn (gen_altivec_dss (op0));
15681 return NULL_RTX;
15683 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15684 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15685 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15686 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15687 case VSX_BUILTIN_VEC_INIT_V2DF:
15688 case VSX_BUILTIN_VEC_INIT_V2DI:
15689 case VSX_BUILTIN_VEC_INIT_V1TI:
15690 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15692 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15693 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15694 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15695 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15696 case VSX_BUILTIN_VEC_SET_V2DF:
15697 case VSX_BUILTIN_VEC_SET_V2DI:
15698 case VSX_BUILTIN_VEC_SET_V1TI:
15699 return altivec_expand_vec_set_builtin (exp);
15701 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15702 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15703 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15704 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15705 case VSX_BUILTIN_VEC_EXT_V2DF:
15706 case VSX_BUILTIN_VEC_EXT_V2DI:
15707 case VSX_BUILTIN_VEC_EXT_V1TI:
15708 return altivec_expand_vec_ext_builtin (exp, target);
15710 case P9V_BUILTIN_VEXTRACT4B:
15711 case P9V_BUILTIN_VEC_VEXTRACT4B:
15712 arg1 = CALL_EXPR_ARG (exp, 1);
15713 STRIP_NOPS (arg1);
15715 /* Generate a normal call if it is invalid. */
15716 if (arg1 == error_mark_node)
15717 return expand_call (exp, target, false);
15719 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15721 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15722 return expand_call (exp, target, false);
15724 break;
15726 case P9V_BUILTIN_VINSERT4B:
15727 case P9V_BUILTIN_VINSERT4B_DI:
15728 case P9V_BUILTIN_VEC_VINSERT4B:
15729 arg2 = CALL_EXPR_ARG (exp, 2);
15730 STRIP_NOPS (arg2);
15732 /* Generate a normal call if it is invalid. */
15733 if (arg2 == error_mark_node)
15734 return expand_call (exp, target, false);
15736 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15738 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15739 return expand_call (exp, target, false);
15741 break;
15743 default:
15744 break;
15745 /* Fall through. */
15748 /* Expand abs* operations. */
15749 d = bdesc_abs;
15750 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15751 if (d->code == fcode)
15752 return altivec_expand_abs_builtin (d->icode, exp, target);
15754 /* Expand the AltiVec predicates. */
15755 d = bdesc_altivec_preds;
15756 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15757 if (d->code == fcode)
15758 return altivec_expand_predicate_builtin (d->icode, exp, target);
15760 /* LV* are funky. We initialized them differently. */
15761 switch (fcode)
15763 case ALTIVEC_BUILTIN_LVSL:
15764 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15765 exp, target, false);
15766 case ALTIVEC_BUILTIN_LVSR:
15767 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15768 exp, target, false);
15769 case ALTIVEC_BUILTIN_LVEBX:
15770 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15771 exp, target, false);
15772 case ALTIVEC_BUILTIN_LVEHX:
15773 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15774 exp, target, false);
15775 case ALTIVEC_BUILTIN_LVEWX:
15776 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15777 exp, target, false);
15778 case ALTIVEC_BUILTIN_LVXL_V2DF:
15779 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15780 exp, target, false);
15781 case ALTIVEC_BUILTIN_LVXL_V2DI:
15782 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15783 exp, target, false);
15784 case ALTIVEC_BUILTIN_LVXL_V4SF:
15785 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15786 exp, target, false);
15787 case ALTIVEC_BUILTIN_LVXL:
15788 case ALTIVEC_BUILTIN_LVXL_V4SI:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15790 exp, target, false);
15791 case ALTIVEC_BUILTIN_LVXL_V8HI:
15792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15793 exp, target, false);
15794 case ALTIVEC_BUILTIN_LVXL_V16QI:
15795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15796 exp, target, false);
15797 case ALTIVEC_BUILTIN_LVX_V2DF:
15798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15799 exp, target, false);
15800 case ALTIVEC_BUILTIN_LVX_V2DI:
15801 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15802 exp, target, false);
15803 case ALTIVEC_BUILTIN_LVX_V4SF:
15804 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15805 exp, target, false);
15806 case ALTIVEC_BUILTIN_LVX:
15807 case ALTIVEC_BUILTIN_LVX_V4SI:
15808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15809 exp, target, false);
15810 case ALTIVEC_BUILTIN_LVX_V8HI:
15811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15812 exp, target, false);
15813 case ALTIVEC_BUILTIN_LVX_V16QI:
15814 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15815 exp, target, false);
15816 case ALTIVEC_BUILTIN_LVLX:
15817 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15818 exp, target, true);
15819 case ALTIVEC_BUILTIN_LVLXL:
15820 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15821 exp, target, true);
15822 case ALTIVEC_BUILTIN_LVRX:
15823 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15824 exp, target, true);
15825 case ALTIVEC_BUILTIN_LVRXL:
15826 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15827 exp, target, true);
15828 case VSX_BUILTIN_LXVD2X_V1TI:
15829 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15830 exp, target, false);
15831 case VSX_BUILTIN_LXVD2X_V2DF:
15832 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15833 exp, target, false);
15834 case VSX_BUILTIN_LXVD2X_V2DI:
15835 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15836 exp, target, false);
15837 case VSX_BUILTIN_LXVW4X_V4SF:
15838 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15839 exp, target, false);
15840 case VSX_BUILTIN_LXVW4X_V4SI:
15841 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15842 exp, target, false);
15843 case VSX_BUILTIN_LXVW4X_V8HI:
15844 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15845 exp, target, false);
15846 case VSX_BUILTIN_LXVW4X_V16QI:
15847 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15848 exp, target, false);
15849 /* For the following on big endian, it's ok to use any appropriate
15850 unaligned-supporting load, so use a generic expander. For
15851 little-endian, the exact element-reversing instruction must
15852 be used. */
15853 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15855 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15856 : CODE_FOR_vsx_ld_elemrev_v2df);
15857 return altivec_expand_lv_builtin (code, exp, target, false);
15859 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15861 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15862 : CODE_FOR_vsx_ld_elemrev_v2di);
15863 return altivec_expand_lv_builtin (code, exp, target, false);
15865 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15867 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15868 : CODE_FOR_vsx_ld_elemrev_v4sf);
15869 return altivec_expand_lv_builtin (code, exp, target, false);
15871 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15873 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15874 : CODE_FOR_vsx_ld_elemrev_v4si);
15875 return altivec_expand_lv_builtin (code, exp, target, false);
15877 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15879 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15880 : CODE_FOR_vsx_ld_elemrev_v8hi);
15881 return altivec_expand_lv_builtin (code, exp, target, false);
15883 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15885 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15886 : CODE_FOR_vsx_ld_elemrev_v16qi);
15887 return altivec_expand_lv_builtin (code, exp, target, false);
15889 break;
15890 default:
15891 break;
15892 /* Fall through. */
15895 *expandedp = false;
15896 return NULL_RTX;
15899 /* Expand the builtin in EXP and store the result in TARGET. Store
15900 true in *EXPANDEDP if we found a builtin to expand. */
15901 static rtx
15902 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15904 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15905 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15906 const struct builtin_description *d;
15907 size_t i;
15909 *expandedp = true;
15911 switch (fcode)
15913 case PAIRED_BUILTIN_STX:
15914 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15915 case PAIRED_BUILTIN_LX:
15916 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15917 default:
15918 break;
15919 /* Fall through. */
15922 /* Expand the paired predicates. */
15923 d = bdesc_paired_preds;
15924 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15925 if (d->code == fcode)
15926 return paired_expand_predicate_builtin (d->icode, exp, target);
15928 *expandedp = false;
15929 return NULL_RTX;
15932 static rtx
15933 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15935 rtx pat, scratch, tmp;
15936 tree form = CALL_EXPR_ARG (exp, 0);
15937 tree arg0 = CALL_EXPR_ARG (exp, 1);
15938 tree arg1 = CALL_EXPR_ARG (exp, 2);
15939 rtx op0 = expand_normal (arg0);
15940 rtx op1 = expand_normal (arg1);
15941 machine_mode mode0 = insn_data[icode].operand[1].mode;
15942 machine_mode mode1 = insn_data[icode].operand[2].mode;
15943 int form_int;
15944 enum rtx_code code;
15946 if (TREE_CODE (form) != INTEGER_CST)
15948 error ("argument 1 of %s must be a constant",
15949 "__builtin_paired_predicate");
15950 return const0_rtx;
15952 else
15953 form_int = TREE_INT_CST_LOW (form);
15955 gcc_assert (mode0 == mode1);
15957 if (arg0 == error_mark_node || arg1 == error_mark_node)
15958 return const0_rtx;
15960 if (target == 0
15961 || GET_MODE (target) != SImode
15962 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15963 target = gen_reg_rtx (SImode);
15964 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15965 op0 = copy_to_mode_reg (mode0, op0);
15966 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15967 op1 = copy_to_mode_reg (mode1, op1);
15969 scratch = gen_reg_rtx (CCFPmode);
15971 pat = GEN_FCN (icode) (scratch, op0, op1);
15972 if (!pat)
15973 return const0_rtx;
15975 emit_insn (pat);
15977 switch (form_int)
15979 /* LT bit. */
15980 case 0:
15981 code = LT;
15982 break;
15983 /* GT bit. */
15984 case 1:
15985 code = GT;
15986 break;
15987 /* EQ bit. */
15988 case 2:
15989 code = EQ;
15990 break;
15991 /* UN bit. */
15992 case 3:
15993 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15994 return target;
15995 default:
15996 error ("argument 1 of %qs is out of range",
15997 "__builtin_paired_predicate");
15998 return const0_rtx;
16001 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16002 emit_move_insn (target, tmp);
16003 return target;
16006 /* Raise an error message for a builtin function that is called without the
16007 appropriate target options being set. */
16009 static void
16010 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16012 size_t uns_fncode = (size_t) fncode;
16013 const char *name = rs6000_builtin_info[uns_fncode].name;
16014 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16016 gcc_assert (name != NULL);
16017 if ((fnmask & RS6000_BTM_CELL) != 0)
16018 error ("builtin function %qs is only valid for the cell processor", name);
16019 else if ((fnmask & RS6000_BTM_VSX) != 0)
16020 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16021 else if ((fnmask & RS6000_BTM_HTM) != 0)
16022 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16023 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16024 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16025 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16026 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16027 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16028 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16029 error ("builtin function %qs requires the %qs and %qs options",
16030 name, "-mhard-dfp", "-mpower8-vector");
16031 else if ((fnmask & RS6000_BTM_DFP) != 0)
16032 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16033 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16034 error ("builtin function %qs requires the %qs option", name,
16035 "-mpower8-vector");
16036 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16037 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16038 error ("builtin function %qs requires the %qs and %qs options",
16039 name, "-mcpu=power9", "-m64");
16040 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16041 error ("builtin function %qs requires the %qs option", name,
16042 "-mcpu=power9");
16043 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16044 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16045 error ("builtin function %qs requires the %qs and %qs options",
16046 name, "-mcpu=power9", "-m64");
16047 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16048 error ("builtin function %qs requires the %qs option", name,
16049 "-mcpu=power9");
16050 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16051 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16052 error ("builtin function %qs requires the %qs and %qs options",
16053 name, "-mhard-float", "-mlong-double-128");
16054 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16055 error ("builtin function %qs requires the %qs option", name,
16056 "-mhard-float");
16057 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
16058 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16059 name);
16060 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16061 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16062 else
16063 error ("builtin function %qs is not supported with the current options",
16064 name);
16067 /* Target hook for early folding of built-ins, shamelessly stolen
16068 from ia64.c. */
16070 static tree
16071 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
16072 int n_args ATTRIBUTE_UNUSED,
16073 tree *args ATTRIBUTE_UNUSED,
16074 bool ignore ATTRIBUTE_UNUSED)
16076 #ifdef SUBTARGET_FOLD_BUILTIN
16077 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16078 #else
16079 return NULL_TREE;
16080 #endif
16083 /* Helper function to sort out which built-ins may be valid without having
16084 a LHS. */
16085 static bool
16086 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
16088 switch (fn_code)
16090 case ALTIVEC_BUILTIN_STVX_V16QI:
16091 case ALTIVEC_BUILTIN_STVX_V8HI:
16092 case ALTIVEC_BUILTIN_STVX_V4SI:
16093 case ALTIVEC_BUILTIN_STVX_V4SF:
16094 case ALTIVEC_BUILTIN_STVX_V2DI:
16095 case ALTIVEC_BUILTIN_STVX_V2DF:
16096 return true;
16097 default:
16098 return false;
16102 /* Helper function to handle the gimple folding of a vector compare
16103 operation. This sets up true/false vectors, and uses the
16104 VEC_COND_EXPR operation.
16105 CODE indicates which comparison is to be made. (EQ, GT, ...).
16106 TYPE indicates the type of the result. */
16107 static tree
16108 fold_build_vec_cmp (tree_code code, tree type,
16109 tree arg0, tree arg1)
16111 tree cmp_type = build_same_sized_truth_vector_type (type);
16112 tree zero_vec = build_zero_cst (type);
16113 tree minus_one_vec = build_minus_one_cst (type);
16114 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
16115 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
16118 /* Helper function to handle the in-between steps for the
16119 vector compare built-ins. */
16120 static void
16121 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
16123 tree arg0 = gimple_call_arg (stmt, 0);
16124 tree arg1 = gimple_call_arg (stmt, 1);
16125 tree lhs = gimple_call_lhs (stmt);
16126 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
16127 gimple *g = gimple_build_assign (lhs, cmp);
16128 gimple_set_location (g, gimple_location (stmt));
16129 gsi_replace (gsi, g, true);
16132 /* Helper function to handle the vector merge[hl] built-ins. The
16133 implementation difference between h and l versions for this code are in
16134 the values used when building of the permute vector for high word versus
16135 low word merge. The variance is keyed off the use_high parameter. */
16136 static void
16137 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
16139 tree arg0 = gimple_call_arg (stmt, 0);
16140 tree arg1 = gimple_call_arg (stmt, 1);
16141 tree lhs = gimple_call_lhs (stmt);
16142 tree lhs_type = TREE_TYPE (lhs);
16143 tree lhs_type_type = TREE_TYPE (lhs_type);
16144 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
16145 int midpoint = n_elts / 2;
16146 int offset = 0;
16148 if (use_high == 1)
16149 offset = midpoint;
16151 tree_vector_builder elts (lhs_type, VECTOR_CST_NELTS (arg0), 1);
16153 for (int i = 0; i < midpoint; i++)
16155 elts.safe_push (build_int_cst (lhs_type_type, offset + i));
16156 elts.safe_push (build_int_cst (lhs_type_type, offset + n_elts + i));
16159 tree permute = elts.build ();
16161 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
16162 gimple_set_location (g, gimple_location (stmt));
16163 gsi_replace (gsi, g, true);
16166 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16167 a constant, use rs6000_fold_builtin.) */
16169 bool
16170 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16172 gimple *stmt = gsi_stmt (*gsi);
16173 tree fndecl = gimple_call_fndecl (stmt);
16174 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16175 enum rs6000_builtins fn_code
16176 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16177 tree arg0, arg1, lhs, temp;
16178 gimple *g;
16180 size_t uns_fncode = (size_t) fn_code;
16181 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16182 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16183 const char *fn_name2 = (icode != CODE_FOR_nothing)
16184 ? get_insn_name ((int) icode)
16185 : "nothing";
16187 if (TARGET_DEBUG_BUILTIN)
16188 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16189 fn_code, fn_name1, fn_name2);
16191 if (!rs6000_fold_gimple)
16192 return false;
16194 /* Prevent gimple folding for code that does not have a LHS, unless it is
16195 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16196 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
16197 return false;
16199 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
16200 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
16201 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
16202 if (!func_valid_p)
16203 return false;
16205 switch (fn_code)
16207 /* Flavors of vec_add. We deliberately don't expand
16208 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16209 TImode, resulting in much poorer code generation. */
16210 case ALTIVEC_BUILTIN_VADDUBM:
16211 case ALTIVEC_BUILTIN_VADDUHM:
16212 case ALTIVEC_BUILTIN_VADDUWM:
16213 case P8V_BUILTIN_VADDUDM:
16214 case ALTIVEC_BUILTIN_VADDFP:
16215 case VSX_BUILTIN_XVADDDP:
16216 arg0 = gimple_call_arg (stmt, 0);
16217 arg1 = gimple_call_arg (stmt, 1);
16218 lhs = gimple_call_lhs (stmt);
16219 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16220 gimple_set_location (g, gimple_location (stmt));
16221 gsi_replace (gsi, g, true);
16222 return true;
16223 /* Flavors of vec_sub. We deliberately don't expand
16224 P8V_BUILTIN_VSUBUQM. */
16225 case ALTIVEC_BUILTIN_VSUBUBM:
16226 case ALTIVEC_BUILTIN_VSUBUHM:
16227 case ALTIVEC_BUILTIN_VSUBUWM:
16228 case P8V_BUILTIN_VSUBUDM:
16229 case ALTIVEC_BUILTIN_VSUBFP:
16230 case VSX_BUILTIN_XVSUBDP:
16231 arg0 = gimple_call_arg (stmt, 0);
16232 arg1 = gimple_call_arg (stmt, 1);
16233 lhs = gimple_call_lhs (stmt);
16234 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16235 gimple_set_location (g, gimple_location (stmt));
16236 gsi_replace (gsi, g, true);
16237 return true;
16238 case VSX_BUILTIN_XVMULSP:
16239 case VSX_BUILTIN_XVMULDP:
16240 arg0 = gimple_call_arg (stmt, 0);
16241 arg1 = gimple_call_arg (stmt, 1);
16242 lhs = gimple_call_lhs (stmt);
16243 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16244 gimple_set_location (g, gimple_location (stmt));
16245 gsi_replace (gsi, g, true);
16246 return true;
16247 /* Even element flavors of vec_mul (signed). */
16248 case ALTIVEC_BUILTIN_VMULESB:
16249 case ALTIVEC_BUILTIN_VMULESH:
16250 case ALTIVEC_BUILTIN_VMULESW:
16251 /* Even element flavors of vec_mul (unsigned). */
16252 case ALTIVEC_BUILTIN_VMULEUB:
16253 case ALTIVEC_BUILTIN_VMULEUH:
16254 case ALTIVEC_BUILTIN_VMULEUW:
16255 arg0 = gimple_call_arg (stmt, 0);
16256 arg1 = gimple_call_arg (stmt, 1);
16257 lhs = gimple_call_lhs (stmt);
16258 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16259 gimple_set_location (g, gimple_location (stmt));
16260 gsi_replace (gsi, g, true);
16261 return true;
16262 /* Odd element flavors of vec_mul (signed). */
16263 case ALTIVEC_BUILTIN_VMULOSB:
16264 case ALTIVEC_BUILTIN_VMULOSH:
16265 case ALTIVEC_BUILTIN_VMULOSW:
16266 /* Odd element flavors of vec_mul (unsigned). */
16267 case ALTIVEC_BUILTIN_VMULOUB:
16268 case ALTIVEC_BUILTIN_VMULOUH:
16269 case ALTIVEC_BUILTIN_VMULOUW:
16270 arg0 = gimple_call_arg (stmt, 0);
16271 arg1 = gimple_call_arg (stmt, 1);
16272 lhs = gimple_call_lhs (stmt);
16273 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16274 gimple_set_location (g, gimple_location (stmt));
16275 gsi_replace (gsi, g, true);
16276 return true;
16277 /* Flavors of vec_div (Integer). */
16278 case VSX_BUILTIN_DIV_V2DI:
16279 case VSX_BUILTIN_UDIV_V2DI:
16280 arg0 = gimple_call_arg (stmt, 0);
16281 arg1 = gimple_call_arg (stmt, 1);
16282 lhs = gimple_call_lhs (stmt);
16283 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16284 gimple_set_location (g, gimple_location (stmt));
16285 gsi_replace (gsi, g, true);
16286 return true;
16287 /* Flavors of vec_div (Float). */
16288 case VSX_BUILTIN_XVDIVSP:
16289 case VSX_BUILTIN_XVDIVDP:
16290 arg0 = gimple_call_arg (stmt, 0);
16291 arg1 = gimple_call_arg (stmt, 1);
16292 lhs = gimple_call_lhs (stmt);
16293 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16294 gimple_set_location (g, gimple_location (stmt));
16295 gsi_replace (gsi, g, true);
16296 return true;
16297 /* Flavors of vec_and. */
16298 case ALTIVEC_BUILTIN_VAND:
16299 arg0 = gimple_call_arg (stmt, 0);
16300 arg1 = gimple_call_arg (stmt, 1);
16301 lhs = gimple_call_lhs (stmt);
16302 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16303 gimple_set_location (g, gimple_location (stmt));
16304 gsi_replace (gsi, g, true);
16305 return true;
16306 /* Flavors of vec_andc. */
16307 case ALTIVEC_BUILTIN_VANDC:
16308 arg0 = gimple_call_arg (stmt, 0);
16309 arg1 = gimple_call_arg (stmt, 1);
16310 lhs = gimple_call_lhs (stmt);
16311 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16312 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16313 gimple_set_location (g, gimple_location (stmt));
16314 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16315 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16316 gimple_set_location (g, gimple_location (stmt));
16317 gsi_replace (gsi, g, true);
16318 return true;
16319 /* Flavors of vec_nand. */
16320 case P8V_BUILTIN_VEC_NAND:
16321 case P8V_BUILTIN_NAND_V16QI:
16322 case P8V_BUILTIN_NAND_V8HI:
16323 case P8V_BUILTIN_NAND_V4SI:
16324 case P8V_BUILTIN_NAND_V4SF:
16325 case P8V_BUILTIN_NAND_V2DF:
16326 case P8V_BUILTIN_NAND_V2DI:
16327 arg0 = gimple_call_arg (stmt, 0);
16328 arg1 = gimple_call_arg (stmt, 1);
16329 lhs = gimple_call_lhs (stmt);
16330 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16331 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
16332 gimple_set_location (g, gimple_location (stmt));
16333 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16334 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16335 gimple_set_location (g, gimple_location (stmt));
16336 gsi_replace (gsi, g, true);
16337 return true;
16338 /* Flavors of vec_or. */
16339 case ALTIVEC_BUILTIN_VOR:
16340 arg0 = gimple_call_arg (stmt, 0);
16341 arg1 = gimple_call_arg (stmt, 1);
16342 lhs = gimple_call_lhs (stmt);
16343 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16344 gimple_set_location (g, gimple_location (stmt));
16345 gsi_replace (gsi, g, true);
16346 return true;
16347 /* flavors of vec_orc. */
16348 case P8V_BUILTIN_ORC_V16QI:
16349 case P8V_BUILTIN_ORC_V8HI:
16350 case P8V_BUILTIN_ORC_V4SI:
16351 case P8V_BUILTIN_ORC_V4SF:
16352 case P8V_BUILTIN_ORC_V2DF:
16353 case P8V_BUILTIN_ORC_V2DI:
16354 arg0 = gimple_call_arg (stmt, 0);
16355 arg1 = gimple_call_arg (stmt, 1);
16356 lhs = gimple_call_lhs (stmt);
16357 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16358 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16359 gimple_set_location (g, gimple_location (stmt));
16360 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16361 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16362 gimple_set_location (g, gimple_location (stmt));
16363 gsi_replace (gsi, g, true);
16364 return true;
16365 /* Flavors of vec_xor. */
16366 case ALTIVEC_BUILTIN_VXOR:
16367 arg0 = gimple_call_arg (stmt, 0);
16368 arg1 = gimple_call_arg (stmt, 1);
16369 lhs = gimple_call_lhs (stmt);
16370 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16371 gimple_set_location (g, gimple_location (stmt));
16372 gsi_replace (gsi, g, true);
16373 return true;
16374 /* Flavors of vec_nor. */
16375 case ALTIVEC_BUILTIN_VNOR:
16376 arg0 = gimple_call_arg (stmt, 0);
16377 arg1 = gimple_call_arg (stmt, 1);
16378 lhs = gimple_call_lhs (stmt);
16379 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16380 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16381 gimple_set_location (g, gimple_location (stmt));
16382 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16383 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16384 gimple_set_location (g, gimple_location (stmt));
16385 gsi_replace (gsi, g, true);
16386 return true;
16387 /* flavors of vec_abs. */
16388 case ALTIVEC_BUILTIN_ABS_V16QI:
16389 case ALTIVEC_BUILTIN_ABS_V8HI:
16390 case ALTIVEC_BUILTIN_ABS_V4SI:
16391 case ALTIVEC_BUILTIN_ABS_V4SF:
16392 case P8V_BUILTIN_ABS_V2DI:
16393 case VSX_BUILTIN_XVABSDP:
16394 arg0 = gimple_call_arg (stmt, 0);
16395 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16396 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16397 return false;
16398 lhs = gimple_call_lhs (stmt);
16399 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16400 gimple_set_location (g, gimple_location (stmt));
16401 gsi_replace (gsi, g, true);
16402 return true;
16403 /* flavors of vec_min. */
16404 case VSX_BUILTIN_XVMINDP:
16405 case P8V_BUILTIN_VMINSD:
16406 case P8V_BUILTIN_VMINUD:
16407 case ALTIVEC_BUILTIN_VMINSB:
16408 case ALTIVEC_BUILTIN_VMINSH:
16409 case ALTIVEC_BUILTIN_VMINSW:
16410 case ALTIVEC_BUILTIN_VMINUB:
16411 case ALTIVEC_BUILTIN_VMINUH:
16412 case ALTIVEC_BUILTIN_VMINUW:
16413 case ALTIVEC_BUILTIN_VMINFP:
16414 arg0 = gimple_call_arg (stmt, 0);
16415 arg1 = gimple_call_arg (stmt, 1);
16416 lhs = gimple_call_lhs (stmt);
16417 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16418 gimple_set_location (g, gimple_location (stmt));
16419 gsi_replace (gsi, g, true);
16420 return true;
16421 /* flavors of vec_max. */
16422 case VSX_BUILTIN_XVMAXDP:
16423 case P8V_BUILTIN_VMAXSD:
16424 case P8V_BUILTIN_VMAXUD:
16425 case ALTIVEC_BUILTIN_VMAXSB:
16426 case ALTIVEC_BUILTIN_VMAXSH:
16427 case ALTIVEC_BUILTIN_VMAXSW:
16428 case ALTIVEC_BUILTIN_VMAXUB:
16429 case ALTIVEC_BUILTIN_VMAXUH:
16430 case ALTIVEC_BUILTIN_VMAXUW:
16431 case ALTIVEC_BUILTIN_VMAXFP:
16432 arg0 = gimple_call_arg (stmt, 0);
16433 arg1 = gimple_call_arg (stmt, 1);
16434 lhs = gimple_call_lhs (stmt);
16435 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16436 gimple_set_location (g, gimple_location (stmt));
16437 gsi_replace (gsi, g, true);
16438 return true;
16439 /* Flavors of vec_eqv. */
16440 case P8V_BUILTIN_EQV_V16QI:
16441 case P8V_BUILTIN_EQV_V8HI:
16442 case P8V_BUILTIN_EQV_V4SI:
16443 case P8V_BUILTIN_EQV_V4SF:
16444 case P8V_BUILTIN_EQV_V2DF:
16445 case P8V_BUILTIN_EQV_V2DI:
16446 arg0 = gimple_call_arg (stmt, 0);
16447 arg1 = gimple_call_arg (stmt, 1);
16448 lhs = gimple_call_lhs (stmt);
16449 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16450 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16451 gimple_set_location (g, gimple_location (stmt));
16452 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16453 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16454 gimple_set_location (g, gimple_location (stmt));
16455 gsi_replace (gsi, g, true);
16456 return true;
16457 /* Flavors of vec_rotate_left. */
16458 case ALTIVEC_BUILTIN_VRLB:
16459 case ALTIVEC_BUILTIN_VRLH:
16460 case ALTIVEC_BUILTIN_VRLW:
16461 case P8V_BUILTIN_VRLD:
16462 arg0 = gimple_call_arg (stmt, 0);
16463 arg1 = gimple_call_arg (stmt, 1);
16464 lhs = gimple_call_lhs (stmt);
16465 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16466 gimple_set_location (g, gimple_location (stmt));
16467 gsi_replace (gsi, g, true);
16468 return true;
16469 /* Flavors of vector shift right algebraic.
16470 vec_sra{b,h,w} -> vsra{b,h,w}. */
16471 case ALTIVEC_BUILTIN_VSRAB:
16472 case ALTIVEC_BUILTIN_VSRAH:
16473 case ALTIVEC_BUILTIN_VSRAW:
16474 case P8V_BUILTIN_VSRAD:
16475 arg0 = gimple_call_arg (stmt, 0);
16476 arg1 = gimple_call_arg (stmt, 1);
16477 lhs = gimple_call_lhs (stmt);
16478 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16479 gimple_set_location (g, gimple_location (stmt));
16480 gsi_replace (gsi, g, true);
16481 return true;
16482 /* Flavors of vector shift left.
16483 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16484 case ALTIVEC_BUILTIN_VSLB:
16485 case ALTIVEC_BUILTIN_VSLH:
16486 case ALTIVEC_BUILTIN_VSLW:
16487 case P8V_BUILTIN_VSLD:
16488 arg0 = gimple_call_arg (stmt, 0);
16489 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16490 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16491 return false;
16492 arg1 = gimple_call_arg (stmt, 1);
16493 lhs = gimple_call_lhs (stmt);
16494 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16495 gimple_set_location (g, gimple_location (stmt));
16496 gsi_replace (gsi, g, true);
16497 return true;
16498 /* Flavors of vector shift right. */
16499 case ALTIVEC_BUILTIN_VSRB:
16500 case ALTIVEC_BUILTIN_VSRH:
16501 case ALTIVEC_BUILTIN_VSRW:
16502 case P8V_BUILTIN_VSRD:
16504 arg0 = gimple_call_arg (stmt, 0);
16505 arg1 = gimple_call_arg (stmt, 1);
16506 lhs = gimple_call_lhs (stmt);
16507 gimple_seq stmts = NULL;
16508 /* Convert arg0 to unsigned. */
16509 tree arg0_unsigned
16510 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16511 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16512 tree res
16513 = gimple_build (&stmts, RSHIFT_EXPR,
16514 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16515 /* Convert result back to the lhs type. */
16516 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16517 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16518 update_call_from_tree (gsi, res);
16519 return true;
16521 /* Vector loads. */
16522 case ALTIVEC_BUILTIN_LVX_V16QI:
16523 case ALTIVEC_BUILTIN_LVX_V8HI:
16524 case ALTIVEC_BUILTIN_LVX_V4SI:
16525 case ALTIVEC_BUILTIN_LVX_V4SF:
16526 case ALTIVEC_BUILTIN_LVX_V2DI:
16527 case ALTIVEC_BUILTIN_LVX_V2DF:
16529 arg0 = gimple_call_arg (stmt, 0); // offset
16530 arg1 = gimple_call_arg (stmt, 1); // address
16531 /* Do not fold for -maltivec=be on LE targets. */
16532 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16533 return false;
16534 lhs = gimple_call_lhs (stmt);
16535 location_t loc = gimple_location (stmt);
16536 /* Since arg1 may be cast to a different type, just use ptr_type_node
16537 here instead of trying to enforce TBAA on pointer types. */
16538 tree arg1_type = ptr_type_node;
16539 tree lhs_type = TREE_TYPE (lhs);
16540 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16541 the tree using the value from arg0. The resulting type will match
16542 the type of arg1. */
16543 gimple_seq stmts = NULL;
16544 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16545 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16546 arg1_type, arg1, temp_offset);
16547 /* Mask off any lower bits from the address. */
16548 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16549 arg1_type, temp_addr,
16550 build_int_cst (arg1_type, -16));
16551 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16552 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16553 take an offset, but since we've already incorporated the offset
16554 above, here we just pass in a zero. */
16555 gimple *g
16556 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16557 build_int_cst (arg1_type, 0)));
16558 gimple_set_location (g, loc);
16559 gsi_replace (gsi, g, true);
16560 return true;
16562 /* Vector stores. */
16563 case ALTIVEC_BUILTIN_STVX_V16QI:
16564 case ALTIVEC_BUILTIN_STVX_V8HI:
16565 case ALTIVEC_BUILTIN_STVX_V4SI:
16566 case ALTIVEC_BUILTIN_STVX_V4SF:
16567 case ALTIVEC_BUILTIN_STVX_V2DI:
16568 case ALTIVEC_BUILTIN_STVX_V2DF:
16570 /* Do not fold for -maltivec=be on LE targets. */
16571 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16572 return false;
16573 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16574 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16575 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16576 location_t loc = gimple_location (stmt);
16577 tree arg0_type = TREE_TYPE (arg0);
16578 /* Use ptr_type_node (no TBAA) for the arg2_type.
16579 FIXME: (Richard) "A proper fix would be to transition this type as
16580 seen from the frontend to GIMPLE, for example in a similar way we
16581 do for MEM_REFs by piggy-backing that on an extra argument, a
16582 constant zero pointer of the alias pointer type to use (which would
16583 also serve as a type indicator of the store itself). I'd use a
16584 target specific internal function for this (not sure if we can have
16585 those target specific, but I guess if it's folded away then that's
16586 fine) and get away with the overload set." */
16587 tree arg2_type = ptr_type_node;
16588 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16589 the tree using the value from arg0. The resulting type will match
16590 the type of arg2. */
16591 gimple_seq stmts = NULL;
16592 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16593 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16594 arg2_type, arg2, temp_offset);
16595 /* Mask off any lower bits from the address. */
16596 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16597 arg2_type, temp_addr,
16598 build_int_cst (arg2_type, -16));
16599 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16600 /* The desired gimple result should be similar to:
16601 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16602 gimple *g
16603 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
16604 build_int_cst (arg2_type, 0)), arg0);
16605 gimple_set_location (g, loc);
16606 gsi_replace (gsi, g, true);
16607 return true;
16610 /* Vector Fused multiply-add (fma). */
16611 case ALTIVEC_BUILTIN_VMADDFP:
16612 case VSX_BUILTIN_XVMADDDP:
16613 case ALTIVEC_BUILTIN_VMLADDUHM:
16615 arg0 = gimple_call_arg (stmt, 0);
16616 arg1 = gimple_call_arg (stmt, 1);
16617 tree arg2 = gimple_call_arg (stmt, 2);
16618 lhs = gimple_call_lhs (stmt);
16619 gimple *g = gimple_build_assign (lhs, FMA_EXPR, arg0, arg1, arg2);
16620 gimple_set_location (g, gimple_location (stmt));
16621 gsi_replace (gsi, g, true);
16622 return true;
16625 /* Vector compares; EQ, NE, GE, GT, LE. */
16626 case ALTIVEC_BUILTIN_VCMPEQUB:
16627 case ALTIVEC_BUILTIN_VCMPEQUH:
16628 case ALTIVEC_BUILTIN_VCMPEQUW:
16629 case P8V_BUILTIN_VCMPEQUD:
16630 fold_compare_helper (gsi, EQ_EXPR, stmt);
16631 return true;
16633 case P9V_BUILTIN_CMPNEB:
16634 case P9V_BUILTIN_CMPNEH:
16635 case P9V_BUILTIN_CMPNEW:
16636 fold_compare_helper (gsi, NE_EXPR, stmt);
16637 return true;
16639 case VSX_BUILTIN_CMPGE_16QI:
16640 case VSX_BUILTIN_CMPGE_U16QI:
16641 case VSX_BUILTIN_CMPGE_8HI:
16642 case VSX_BUILTIN_CMPGE_U8HI:
16643 case VSX_BUILTIN_CMPGE_4SI:
16644 case VSX_BUILTIN_CMPGE_U4SI:
16645 case VSX_BUILTIN_CMPGE_2DI:
16646 case VSX_BUILTIN_CMPGE_U2DI:
16647 fold_compare_helper (gsi, GE_EXPR, stmt);
16648 return true;
16650 case ALTIVEC_BUILTIN_VCMPGTSB:
16651 case ALTIVEC_BUILTIN_VCMPGTUB:
16652 case ALTIVEC_BUILTIN_VCMPGTSH:
16653 case ALTIVEC_BUILTIN_VCMPGTUH:
16654 case ALTIVEC_BUILTIN_VCMPGTSW:
16655 case ALTIVEC_BUILTIN_VCMPGTUW:
16656 case P8V_BUILTIN_VCMPGTUD:
16657 case P8V_BUILTIN_VCMPGTSD:
16658 fold_compare_helper (gsi, GT_EXPR, stmt);
16659 return true;
16661 case VSX_BUILTIN_CMPLE_16QI:
16662 case VSX_BUILTIN_CMPLE_U16QI:
16663 case VSX_BUILTIN_CMPLE_8HI:
16664 case VSX_BUILTIN_CMPLE_U8HI:
16665 case VSX_BUILTIN_CMPLE_4SI:
16666 case VSX_BUILTIN_CMPLE_U4SI:
16667 case VSX_BUILTIN_CMPLE_2DI:
16668 case VSX_BUILTIN_CMPLE_U2DI:
16669 fold_compare_helper (gsi, LE_EXPR, stmt);
16670 return true;
16672 /* flavors of vec_splat_[us]{8,16,32}. */
16673 case ALTIVEC_BUILTIN_VSPLTISB:
16674 case ALTIVEC_BUILTIN_VSPLTISH:
16675 case ALTIVEC_BUILTIN_VSPLTISW:
16677 arg0 = gimple_call_arg (stmt, 0);
16678 lhs = gimple_call_lhs (stmt);
16679 /* Only fold the vec_splat_*() if arg0 is constant. */
16680 if (TREE_CODE (arg0) != INTEGER_CST)
16681 return false;
16682 gimple_seq stmts = NULL;
16683 location_t loc = gimple_location (stmt);
16684 tree splat_value = gimple_convert (&stmts, loc,
16685 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16686 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16687 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16688 g = gimple_build_assign (lhs, splat_tree);
16689 gimple_set_location (g, gimple_location (stmt));
16690 gsi_replace (gsi, g, true);
16691 return true;
16694 /* vec_mergel (integrals). */
16695 case ALTIVEC_BUILTIN_VMRGLH:
16696 case ALTIVEC_BUILTIN_VMRGLW:
16697 case VSX_BUILTIN_XXMRGLW_4SI:
16698 case ALTIVEC_BUILTIN_VMRGLB:
16699 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16700 /* Do not fold for -maltivec=be on LE targets. */
16701 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16702 return false;
16703 fold_mergehl_helper (gsi, stmt, 1);
16704 return true;
16705 /* vec_mergeh (integrals). */
16706 case ALTIVEC_BUILTIN_VMRGHH:
16707 case ALTIVEC_BUILTIN_VMRGHW:
16708 case VSX_BUILTIN_XXMRGHW_4SI:
16709 case ALTIVEC_BUILTIN_VMRGHB:
16710 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16711 /* Do not fold for -maltivec=be on LE targets. */
16712 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16713 return false;
16714 fold_mergehl_helper (gsi, stmt, 0);
16715 return true;
16716 default:
16717 if (TARGET_DEBUG_BUILTIN)
16718 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16719 fn_code, fn_name1, fn_name2);
16720 break;
16723 return false;
16726 /* Expand an expression EXP that calls a built-in function,
16727 with result going to TARGET if that's convenient
16728 (and in mode MODE if that's convenient).
16729 SUBTARGET may be used as the target for computing one of EXP's operands.
16730 IGNORE is nonzero if the value is to be ignored. */
16732 static rtx
16733 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16734 machine_mode mode ATTRIBUTE_UNUSED,
16735 int ignore ATTRIBUTE_UNUSED)
16737 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16738 enum rs6000_builtins fcode
16739 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16740 size_t uns_fcode = (size_t)fcode;
16741 const struct builtin_description *d;
16742 size_t i;
16743 rtx ret;
16744 bool success;
16745 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16746 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16747 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16749 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16750 floating point type, depending on whether long double is the IBM extended
16751 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16752 we only define one variant of the built-in function, and switch the code
16753 when defining it, rather than defining two built-ins and using the
16754 overload table in rs6000-c.c to switch between the two. If we don't have
16755 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16756 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16757 #ifdef HAVE_AS_POWER9
16758 if (FLOAT128_IEEE_P (TFmode))
16759 switch (icode)
16761 default:
16762 break;
16764 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16765 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16766 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16767 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16768 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16769 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16770 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16771 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16772 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16773 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16774 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16775 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16776 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16778 #endif
16780 if (TARGET_DEBUG_BUILTIN)
16782 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16783 const char *name2 = (icode != CODE_FOR_nothing)
16784 ? get_insn_name ((int) icode)
16785 : "nothing";
16786 const char *name3;
16788 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16790 default: name3 = "unknown"; break;
16791 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16792 case RS6000_BTC_UNARY: name3 = "unary"; break;
16793 case RS6000_BTC_BINARY: name3 = "binary"; break;
16794 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16795 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16796 case RS6000_BTC_ABS: name3 = "abs"; break;
16797 case RS6000_BTC_DST: name3 = "dst"; break;
16801 fprintf (stderr,
16802 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16803 (name1) ? name1 : "---", fcode,
16804 (name2) ? name2 : "---", (int) icode,
16805 name3,
16806 func_valid_p ? "" : ", not valid");
16809 if (!func_valid_p)
16811 rs6000_invalid_builtin (fcode);
16813 /* Given it is invalid, just generate a normal call. */
16814 return expand_call (exp, target, ignore);
16817 switch (fcode)
16819 case RS6000_BUILTIN_RECIP:
16820 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16822 case RS6000_BUILTIN_RECIPF:
16823 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16825 case RS6000_BUILTIN_RSQRTF:
16826 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16828 case RS6000_BUILTIN_RSQRT:
16829 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16831 case POWER7_BUILTIN_BPERMD:
16832 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16833 ? CODE_FOR_bpermd_di
16834 : CODE_FOR_bpermd_si), exp, target);
16836 case RS6000_BUILTIN_GET_TB:
16837 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16838 target);
16840 case RS6000_BUILTIN_MFTB:
16841 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16842 ? CODE_FOR_rs6000_mftb_di
16843 : CODE_FOR_rs6000_mftb_si),
16844 target);
16846 case RS6000_BUILTIN_MFFS:
16847 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16849 case RS6000_BUILTIN_MTFSF:
16850 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16852 case RS6000_BUILTIN_CPU_INIT:
16853 case RS6000_BUILTIN_CPU_IS:
16854 case RS6000_BUILTIN_CPU_SUPPORTS:
16855 return cpu_expand_builtin (fcode, exp, target);
16857 case MISC_BUILTIN_SPEC_BARRIER:
16859 emit_insn (gen_rs6000_speculation_barrier ());
16860 return NULL_RTX;
16863 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16864 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16866 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16867 : (int) CODE_FOR_altivec_lvsl_direct);
16868 machine_mode tmode = insn_data[icode2].operand[0].mode;
16869 machine_mode mode = insn_data[icode2].operand[1].mode;
16870 tree arg;
16871 rtx op, addr, pat;
16873 gcc_assert (TARGET_ALTIVEC);
16875 arg = CALL_EXPR_ARG (exp, 0);
16876 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16877 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16878 addr = memory_address (mode, op);
16879 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16880 op = addr;
16881 else
16883 /* For the load case need to negate the address. */
16884 op = gen_reg_rtx (GET_MODE (addr));
16885 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16887 op = gen_rtx_MEM (mode, op);
16889 if (target == 0
16890 || GET_MODE (target) != tmode
16891 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16892 target = gen_reg_rtx (tmode);
16894 pat = GEN_FCN (icode2) (target, op);
16895 if (!pat)
16896 return 0;
16897 emit_insn (pat);
16899 return target;
16902 case ALTIVEC_BUILTIN_VCFUX:
16903 case ALTIVEC_BUILTIN_VCFSX:
16904 case ALTIVEC_BUILTIN_VCTUXS:
16905 case ALTIVEC_BUILTIN_VCTSXS:
16906 /* FIXME: There's got to be a nicer way to handle this case than
16907 constructing a new CALL_EXPR. */
16908 if (call_expr_nargs (exp) == 1)
16910 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16911 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16913 break;
16915 default:
16916 break;
16919 if (TARGET_ALTIVEC)
16921 ret = altivec_expand_builtin (exp, target, &success);
16923 if (success)
16924 return ret;
16926 if (TARGET_PAIRED_FLOAT)
16928 ret = paired_expand_builtin (exp, target, &success);
16930 if (success)
16931 return ret;
16933 if (TARGET_HTM)
16935 ret = htm_expand_builtin (exp, target, &success);
16937 if (success)
16938 return ret;
16941 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16942 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16943 gcc_assert (attr == RS6000_BTC_UNARY
16944 || attr == RS6000_BTC_BINARY
16945 || attr == RS6000_BTC_TERNARY
16946 || attr == RS6000_BTC_SPECIAL);
16948 /* Handle simple unary operations. */
16949 d = bdesc_1arg;
16950 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16951 if (d->code == fcode)
16952 return rs6000_expand_unop_builtin (icode, exp, target);
16954 /* Handle simple binary operations. */
16955 d = bdesc_2arg;
16956 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16957 if (d->code == fcode)
16958 return rs6000_expand_binop_builtin (icode, exp, target);
16960 /* Handle simple ternary operations. */
16961 d = bdesc_3arg;
16962 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16963 if (d->code == fcode)
16964 return rs6000_expand_ternop_builtin (icode, exp, target);
16966 /* Handle simple no-argument operations. */
16967 d = bdesc_0arg;
16968 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16969 if (d->code == fcode)
16970 return rs6000_expand_zeroop_builtin (icode, target);
16972 gcc_unreachable ();
16975 /* Create a builtin vector type with a name. Taking care not to give
16976 the canonical type a name. */
16978 static tree
16979 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16981 tree result = build_vector_type (elt_type, num_elts);
16983 /* Copy so we don't give the canonical type a name. */
16984 result = build_variant_type_copy (result);
16986 add_builtin_type (name, result);
16988 return result;
16991 static void
16992 rs6000_init_builtins (void)
16994 tree tdecl;
16995 tree ftype;
16996 machine_mode mode;
16998 if (TARGET_DEBUG_BUILTIN)
16999 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
17000 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
17001 (TARGET_ALTIVEC) ? ", altivec" : "",
17002 (TARGET_VSX) ? ", vsx" : "");
17004 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17005 V2SF_type_node = build_vector_type (float_type_node, 2);
17006 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
17007 : "__vector long long",
17008 intDI_type_node, 2);
17009 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
17010 V4SI_type_node = rs6000_vector_type ("__vector signed int",
17011 intSI_type_node, 4);
17012 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
17013 V8HI_type_node = rs6000_vector_type ("__vector signed short",
17014 intHI_type_node, 8);
17015 V16QI_type_node = rs6000_vector_type ("__vector signed char",
17016 intQI_type_node, 16);
17018 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
17019 unsigned_intQI_type_node, 16);
17020 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
17021 unsigned_intHI_type_node, 8);
17022 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
17023 unsigned_intSI_type_node, 4);
17024 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17025 ? "__vector unsigned long"
17026 : "__vector unsigned long long",
17027 unsigned_intDI_type_node, 2);
17029 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
17030 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
17031 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
17032 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
17034 const_str_type_node
17035 = build_pointer_type (build_qualified_type (char_type_node,
17036 TYPE_QUAL_CONST));
17038 /* We use V1TI mode as a special container to hold __int128_t items that
17039 must live in VSX registers. */
17040 if (intTI_type_node)
17042 V1TI_type_node = rs6000_vector_type ("__vector __int128",
17043 intTI_type_node, 1);
17044 unsigned_V1TI_type_node
17045 = rs6000_vector_type ("__vector unsigned __int128",
17046 unsigned_intTI_type_node, 1);
17049 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
17050 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
17051 'vector unsigned short'. */
17053 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
17054 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17055 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
17056 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
17057 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17059 long_integer_type_internal_node = long_integer_type_node;
17060 long_unsigned_type_internal_node = long_unsigned_type_node;
17061 long_long_integer_type_internal_node = long_long_integer_type_node;
17062 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
17063 intQI_type_internal_node = intQI_type_node;
17064 uintQI_type_internal_node = unsigned_intQI_type_node;
17065 intHI_type_internal_node = intHI_type_node;
17066 uintHI_type_internal_node = unsigned_intHI_type_node;
17067 intSI_type_internal_node = intSI_type_node;
17068 uintSI_type_internal_node = unsigned_intSI_type_node;
17069 intDI_type_internal_node = intDI_type_node;
17070 uintDI_type_internal_node = unsigned_intDI_type_node;
17071 intTI_type_internal_node = intTI_type_node;
17072 uintTI_type_internal_node = unsigned_intTI_type_node;
17073 float_type_internal_node = float_type_node;
17074 double_type_internal_node = double_type_node;
17075 long_double_type_internal_node = long_double_type_node;
17076 dfloat64_type_internal_node = dfloat64_type_node;
17077 dfloat128_type_internal_node = dfloat128_type_node;
17078 void_type_internal_node = void_type_node;
17080 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17081 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17082 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17083 format that uses a pair of doubles, depending on the switches and
17084 defaults.
17086 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17087 floating point, we need make sure the type is non-zero or else self-test
17088 fails during bootstrap.
17090 We don't register a built-in type for __ibm128 if the type is the same as
17091 long double. Instead we add a #define for __ibm128 in
17092 rs6000_cpu_cpp_builtins to long double.
17094 For IEEE 128-bit floating point, always create the type __ieee128. If the
17095 user used -mfloat128, rs6000-c.c will create a define from __float128 to
17096 __ieee128. */
17097 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17099 ibm128_float_type_node = make_node (REAL_TYPE);
17100 TYPE_PRECISION (ibm128_float_type_node) = 128;
17101 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17102 layout_type (ibm128_float_type_node);
17104 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17105 "__ibm128");
17107 else
17108 ibm128_float_type_node = long_double_type_node;
17110 if (TARGET_FLOAT128_TYPE)
17112 ieee128_float_type_node = float128_type_node;
17113 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17114 "__ieee128");
17117 else
17118 ieee128_float_type_node = long_double_type_node;
17120 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17121 tree type node. */
17122 builtin_mode_to_type[QImode][0] = integer_type_node;
17123 builtin_mode_to_type[HImode][0] = integer_type_node;
17124 builtin_mode_to_type[SImode][0] = intSI_type_node;
17125 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17126 builtin_mode_to_type[DImode][0] = intDI_type_node;
17127 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17128 builtin_mode_to_type[TImode][0] = intTI_type_node;
17129 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17130 builtin_mode_to_type[SFmode][0] = float_type_node;
17131 builtin_mode_to_type[DFmode][0] = double_type_node;
17132 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17133 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17134 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17135 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17136 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17137 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17138 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17139 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17140 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17141 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17142 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17143 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17144 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17145 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17146 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17147 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17148 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17149 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17150 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17152 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17153 TYPE_NAME (bool_char_type_node) = tdecl;
17155 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17156 TYPE_NAME (bool_short_type_node) = tdecl;
17158 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17159 TYPE_NAME (bool_int_type_node) = tdecl;
17161 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17162 TYPE_NAME (pixel_type_node) = tdecl;
17164 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17165 bool_char_type_node, 16);
17166 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17167 bool_short_type_node, 8);
17168 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17169 bool_int_type_node, 4);
17170 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17171 ? "__vector __bool long"
17172 : "__vector __bool long long",
17173 bool_long_type_node, 2);
17174 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17175 pixel_type_node, 8);
17177 /* Paired builtins are only available if you build a compiler with the
17178 appropriate options, so only create those builtins with the appropriate
17179 compiler option. Create Altivec and VSX builtins on machines with at
17180 least the general purpose extensions (970 and newer) to allow the use of
17181 the target attribute. */
17182 if (TARGET_PAIRED_FLOAT)
17183 paired_init_builtins ();
17184 if (TARGET_EXTRA_BUILTINS)
17185 altivec_init_builtins ();
17186 if (TARGET_HTM)
17187 htm_init_builtins ();
17189 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17190 rs6000_common_init_builtins ();
17192 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17193 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17194 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17196 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17197 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17198 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17200 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17201 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17202 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17204 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17205 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17206 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17208 mode = (TARGET_64BIT) ? DImode : SImode;
17209 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17210 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17211 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17213 ftype = build_function_type_list (unsigned_intDI_type_node,
17214 NULL_TREE);
17215 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17217 if (TARGET_64BIT)
17218 ftype = build_function_type_list (unsigned_intDI_type_node,
17219 NULL_TREE);
17220 else
17221 ftype = build_function_type_list (unsigned_intSI_type_node,
17222 NULL_TREE);
17223 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17225 ftype = build_function_type_list (double_type_node, NULL_TREE);
17226 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17228 ftype = build_function_type_list (void_type_node,
17229 intSI_type_node, double_type_node,
17230 NULL_TREE);
17231 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17233 ftype = build_function_type_list (void_type_node, NULL_TREE);
17234 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17235 def_builtin ("__builtin_rs6000_speculation_barrier", ftype,
17236 MISC_BUILTIN_SPEC_BARRIER);
17238 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17239 NULL_TREE);
17240 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17241 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17243 /* AIX libm provides clog as __clog. */
17244 if (TARGET_XCOFF &&
17245 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17246 set_user_assembler_name (tdecl, "__clog");
17248 #ifdef SUBTARGET_INIT_BUILTINS
17249 SUBTARGET_INIT_BUILTINS;
17250 #endif
17253 /* Returns the rs6000 builtin decl for CODE. */
17255 static tree
17256 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17258 HOST_WIDE_INT fnmask;
17260 if (code >= RS6000_BUILTIN_COUNT)
17261 return error_mark_node;
17263 fnmask = rs6000_builtin_info[code].mask;
17264 if ((fnmask & rs6000_builtin_mask) != fnmask)
17266 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17267 return error_mark_node;
17270 return rs6000_builtin_decls[code];
17273 static void
17274 paired_init_builtins (void)
17276 const struct builtin_description *d;
17277 size_t i;
17278 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17280 tree int_ftype_int_v2sf_v2sf
17281 = build_function_type_list (integer_type_node,
17282 integer_type_node,
17283 V2SF_type_node,
17284 V2SF_type_node,
17285 NULL_TREE);
17286 tree pcfloat_type_node =
17287 build_pointer_type (build_qualified_type
17288 (float_type_node, TYPE_QUAL_CONST));
17290 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17291 long_integer_type_node,
17292 pcfloat_type_node,
17293 NULL_TREE);
17294 tree void_ftype_v2sf_long_pcfloat =
17295 build_function_type_list (void_type_node,
17296 V2SF_type_node,
17297 long_integer_type_node,
17298 pcfloat_type_node,
17299 NULL_TREE);
17302 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17303 PAIRED_BUILTIN_LX);
17306 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17307 PAIRED_BUILTIN_STX);
17309 /* Predicates. */
17310 d = bdesc_paired_preds;
17311 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17313 tree type;
17314 HOST_WIDE_INT mask = d->mask;
17316 if ((mask & builtin_mask) != mask)
17318 if (TARGET_DEBUG_BUILTIN)
17319 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17320 d->name);
17321 continue;
17324 /* Cannot define builtin if the instruction is disabled. */
17325 gcc_assert (d->icode != CODE_FOR_nothing);
17327 if (TARGET_DEBUG_BUILTIN)
17328 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17329 (int)i, get_insn_name (d->icode), (int)d->icode,
17330 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17332 switch (insn_data[d->icode].operand[1].mode)
17334 case E_V2SFmode:
17335 type = int_ftype_int_v2sf_v2sf;
17336 break;
17337 default:
17338 gcc_unreachable ();
17341 def_builtin (d->name, type, d->code);
17345 static void
17346 altivec_init_builtins (void)
17348 const struct builtin_description *d;
17349 size_t i;
17350 tree ftype;
17351 tree decl;
17352 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17354 tree pvoid_type_node = build_pointer_type (void_type_node);
17356 tree pcvoid_type_node
17357 = build_pointer_type (build_qualified_type (void_type_node,
17358 TYPE_QUAL_CONST));
17360 tree int_ftype_opaque
17361 = build_function_type_list (integer_type_node,
17362 opaque_V4SI_type_node, NULL_TREE);
17363 tree opaque_ftype_opaque
17364 = build_function_type_list (integer_type_node, NULL_TREE);
17365 tree opaque_ftype_opaque_int
17366 = build_function_type_list (opaque_V4SI_type_node,
17367 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17368 tree opaque_ftype_opaque_opaque_int
17369 = build_function_type_list (opaque_V4SI_type_node,
17370 opaque_V4SI_type_node, opaque_V4SI_type_node,
17371 integer_type_node, NULL_TREE);
17372 tree opaque_ftype_opaque_opaque_opaque
17373 = build_function_type_list (opaque_V4SI_type_node,
17374 opaque_V4SI_type_node, opaque_V4SI_type_node,
17375 opaque_V4SI_type_node, NULL_TREE);
17376 tree opaque_ftype_opaque_opaque
17377 = build_function_type_list (opaque_V4SI_type_node,
17378 opaque_V4SI_type_node, opaque_V4SI_type_node,
17379 NULL_TREE);
17380 tree int_ftype_int_opaque_opaque
17381 = build_function_type_list (integer_type_node,
17382 integer_type_node, opaque_V4SI_type_node,
17383 opaque_V4SI_type_node, NULL_TREE);
17384 tree int_ftype_int_v4si_v4si
17385 = build_function_type_list (integer_type_node,
17386 integer_type_node, V4SI_type_node,
17387 V4SI_type_node, NULL_TREE);
17388 tree int_ftype_int_v2di_v2di
17389 = build_function_type_list (integer_type_node,
17390 integer_type_node, V2DI_type_node,
17391 V2DI_type_node, NULL_TREE);
17392 tree void_ftype_v4si
17393 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17394 tree v8hi_ftype_void
17395 = build_function_type_list (V8HI_type_node, NULL_TREE);
17396 tree void_ftype_void
17397 = build_function_type_list (void_type_node, NULL_TREE);
17398 tree void_ftype_int
17399 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17401 tree opaque_ftype_long_pcvoid
17402 = build_function_type_list (opaque_V4SI_type_node,
17403 long_integer_type_node, pcvoid_type_node,
17404 NULL_TREE);
17405 tree v16qi_ftype_long_pcvoid
17406 = build_function_type_list (V16QI_type_node,
17407 long_integer_type_node, pcvoid_type_node,
17408 NULL_TREE);
17409 tree v8hi_ftype_long_pcvoid
17410 = build_function_type_list (V8HI_type_node,
17411 long_integer_type_node, pcvoid_type_node,
17412 NULL_TREE);
17413 tree v4si_ftype_long_pcvoid
17414 = build_function_type_list (V4SI_type_node,
17415 long_integer_type_node, pcvoid_type_node,
17416 NULL_TREE);
17417 tree v4sf_ftype_long_pcvoid
17418 = build_function_type_list (V4SF_type_node,
17419 long_integer_type_node, pcvoid_type_node,
17420 NULL_TREE);
17421 tree v2df_ftype_long_pcvoid
17422 = build_function_type_list (V2DF_type_node,
17423 long_integer_type_node, pcvoid_type_node,
17424 NULL_TREE);
17425 tree v2di_ftype_long_pcvoid
17426 = build_function_type_list (V2DI_type_node,
17427 long_integer_type_node, pcvoid_type_node,
17428 NULL_TREE);
17430 tree void_ftype_opaque_long_pvoid
17431 = build_function_type_list (void_type_node,
17432 opaque_V4SI_type_node, long_integer_type_node,
17433 pvoid_type_node, NULL_TREE);
17434 tree void_ftype_v4si_long_pvoid
17435 = build_function_type_list (void_type_node,
17436 V4SI_type_node, long_integer_type_node,
17437 pvoid_type_node, NULL_TREE);
17438 tree void_ftype_v16qi_long_pvoid
17439 = build_function_type_list (void_type_node,
17440 V16QI_type_node, long_integer_type_node,
17441 pvoid_type_node, NULL_TREE);
17443 tree void_ftype_v16qi_pvoid_long
17444 = build_function_type_list (void_type_node,
17445 V16QI_type_node, pvoid_type_node,
17446 long_integer_type_node, NULL_TREE);
17448 tree void_ftype_v8hi_long_pvoid
17449 = build_function_type_list (void_type_node,
17450 V8HI_type_node, long_integer_type_node,
17451 pvoid_type_node, NULL_TREE);
17452 tree void_ftype_v4sf_long_pvoid
17453 = build_function_type_list (void_type_node,
17454 V4SF_type_node, long_integer_type_node,
17455 pvoid_type_node, NULL_TREE);
17456 tree void_ftype_v2df_long_pvoid
17457 = build_function_type_list (void_type_node,
17458 V2DF_type_node, long_integer_type_node,
17459 pvoid_type_node, NULL_TREE);
17460 tree void_ftype_v2di_long_pvoid
17461 = build_function_type_list (void_type_node,
17462 V2DI_type_node, long_integer_type_node,
17463 pvoid_type_node, NULL_TREE);
17464 tree int_ftype_int_v8hi_v8hi
17465 = build_function_type_list (integer_type_node,
17466 integer_type_node, V8HI_type_node,
17467 V8HI_type_node, NULL_TREE);
17468 tree int_ftype_int_v16qi_v16qi
17469 = build_function_type_list (integer_type_node,
17470 integer_type_node, V16QI_type_node,
17471 V16QI_type_node, NULL_TREE);
17472 tree int_ftype_int_v4sf_v4sf
17473 = build_function_type_list (integer_type_node,
17474 integer_type_node, V4SF_type_node,
17475 V4SF_type_node, NULL_TREE);
17476 tree int_ftype_int_v2df_v2df
17477 = build_function_type_list (integer_type_node,
17478 integer_type_node, V2DF_type_node,
17479 V2DF_type_node, NULL_TREE);
17480 tree v2di_ftype_v2di
17481 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17482 tree v4si_ftype_v4si
17483 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17484 tree v8hi_ftype_v8hi
17485 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17486 tree v16qi_ftype_v16qi
17487 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17488 tree v4sf_ftype_v4sf
17489 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17490 tree v2df_ftype_v2df
17491 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17492 tree void_ftype_pcvoid_int_int
17493 = build_function_type_list (void_type_node,
17494 pcvoid_type_node, integer_type_node,
17495 integer_type_node, NULL_TREE);
17497 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17498 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17499 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17500 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17501 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17502 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17503 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17504 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17505 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17506 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17507 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17508 ALTIVEC_BUILTIN_LVXL_V2DF);
17509 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17510 ALTIVEC_BUILTIN_LVXL_V2DI);
17511 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17512 ALTIVEC_BUILTIN_LVXL_V4SF);
17513 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17514 ALTIVEC_BUILTIN_LVXL_V4SI);
17515 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17516 ALTIVEC_BUILTIN_LVXL_V8HI);
17517 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17518 ALTIVEC_BUILTIN_LVXL_V16QI);
17519 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17520 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17521 ALTIVEC_BUILTIN_LVX_V2DF);
17522 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17523 ALTIVEC_BUILTIN_LVX_V2DI);
17524 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17525 ALTIVEC_BUILTIN_LVX_V4SF);
17526 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17527 ALTIVEC_BUILTIN_LVX_V4SI);
17528 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17529 ALTIVEC_BUILTIN_LVX_V8HI);
17530 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17531 ALTIVEC_BUILTIN_LVX_V16QI);
17532 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17533 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17534 ALTIVEC_BUILTIN_STVX_V2DF);
17535 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17536 ALTIVEC_BUILTIN_STVX_V2DI);
17537 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17538 ALTIVEC_BUILTIN_STVX_V4SF);
17539 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17540 ALTIVEC_BUILTIN_STVX_V4SI);
17541 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17542 ALTIVEC_BUILTIN_STVX_V8HI);
17543 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17544 ALTIVEC_BUILTIN_STVX_V16QI);
17545 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17546 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17547 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17548 ALTIVEC_BUILTIN_STVXL_V2DF);
17549 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17550 ALTIVEC_BUILTIN_STVXL_V2DI);
17551 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17552 ALTIVEC_BUILTIN_STVXL_V4SF);
17553 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17554 ALTIVEC_BUILTIN_STVXL_V4SI);
17555 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17556 ALTIVEC_BUILTIN_STVXL_V8HI);
17557 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17558 ALTIVEC_BUILTIN_STVXL_V16QI);
17559 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17560 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17561 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17562 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17563 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17564 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17565 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17566 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17567 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17568 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17569 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17570 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17571 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17572 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17573 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17574 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17576 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17577 VSX_BUILTIN_LXVD2X_V2DF);
17578 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17579 VSX_BUILTIN_LXVD2X_V2DI);
17580 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17581 VSX_BUILTIN_LXVW4X_V4SF);
17582 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17583 VSX_BUILTIN_LXVW4X_V4SI);
17584 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17585 VSX_BUILTIN_LXVW4X_V8HI);
17586 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17587 VSX_BUILTIN_LXVW4X_V16QI);
17588 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17589 VSX_BUILTIN_STXVD2X_V2DF);
17590 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17591 VSX_BUILTIN_STXVD2X_V2DI);
17592 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17593 VSX_BUILTIN_STXVW4X_V4SF);
17594 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17595 VSX_BUILTIN_STXVW4X_V4SI);
17596 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17597 VSX_BUILTIN_STXVW4X_V8HI);
17598 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17599 VSX_BUILTIN_STXVW4X_V16QI);
17601 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17602 VSX_BUILTIN_LD_ELEMREV_V2DF);
17603 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17604 VSX_BUILTIN_LD_ELEMREV_V2DI);
17605 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17606 VSX_BUILTIN_LD_ELEMREV_V4SF);
17607 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17608 VSX_BUILTIN_LD_ELEMREV_V4SI);
17609 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17610 VSX_BUILTIN_LD_ELEMREV_V8HI);
17611 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17612 VSX_BUILTIN_LD_ELEMREV_V16QI);
17613 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17614 VSX_BUILTIN_ST_ELEMREV_V2DF);
17615 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17616 VSX_BUILTIN_ST_ELEMREV_V2DI);
17617 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17618 VSX_BUILTIN_ST_ELEMREV_V4SF);
17619 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17620 VSX_BUILTIN_ST_ELEMREV_V4SI);
17621 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17622 VSX_BUILTIN_ST_ELEMREV_V8HI);
17623 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17624 VSX_BUILTIN_ST_ELEMREV_V16QI);
17626 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17627 VSX_BUILTIN_VEC_LD);
17628 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17629 VSX_BUILTIN_VEC_ST);
17630 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17631 VSX_BUILTIN_VEC_XL);
17632 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17633 VSX_BUILTIN_VEC_XL_BE);
17634 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17635 VSX_BUILTIN_VEC_XST);
17636 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17637 VSX_BUILTIN_VEC_XST_BE);
17639 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17640 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17641 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17643 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17644 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17645 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17646 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17647 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17648 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17649 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17650 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17651 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17652 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17653 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17654 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17656 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17657 ALTIVEC_BUILTIN_VEC_ADDE);
17658 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17659 ALTIVEC_BUILTIN_VEC_ADDEC);
17660 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17661 ALTIVEC_BUILTIN_VEC_CMPNE);
17662 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17663 ALTIVEC_BUILTIN_VEC_MUL);
17664 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17665 ALTIVEC_BUILTIN_VEC_SUBE);
17666 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17667 ALTIVEC_BUILTIN_VEC_SUBEC);
17669 /* Cell builtins. */
17670 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17671 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17672 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17673 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17675 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17676 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17677 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17678 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17680 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17681 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17682 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17683 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17685 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17686 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17687 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17688 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17690 if (TARGET_P9_VECTOR)
17692 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17693 P9V_BUILTIN_STXVL);
17694 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17695 P9V_BUILTIN_XST_LEN_R);
17698 /* Add the DST variants. */
17699 d = bdesc_dst;
17700 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17702 HOST_WIDE_INT mask = d->mask;
17704 /* It is expected that these dst built-in functions may have
17705 d->icode equal to CODE_FOR_nothing. */
17706 if ((mask & builtin_mask) != mask)
17708 if (TARGET_DEBUG_BUILTIN)
17709 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17710 d->name);
17711 continue;
17713 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17716 /* Initialize the predicates. */
17717 d = bdesc_altivec_preds;
17718 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17720 machine_mode mode1;
17721 tree type;
17722 HOST_WIDE_INT mask = d->mask;
17724 if ((mask & builtin_mask) != mask)
17726 if (TARGET_DEBUG_BUILTIN)
17727 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17728 d->name);
17729 continue;
17732 if (rs6000_overloaded_builtin_p (d->code))
17733 mode1 = VOIDmode;
17734 else
17736 /* Cannot define builtin if the instruction is disabled. */
17737 gcc_assert (d->icode != CODE_FOR_nothing);
17738 mode1 = insn_data[d->icode].operand[1].mode;
17741 switch (mode1)
17743 case E_VOIDmode:
17744 type = int_ftype_int_opaque_opaque;
17745 break;
17746 case E_V2DImode:
17747 type = int_ftype_int_v2di_v2di;
17748 break;
17749 case E_V4SImode:
17750 type = int_ftype_int_v4si_v4si;
17751 break;
17752 case E_V8HImode:
17753 type = int_ftype_int_v8hi_v8hi;
17754 break;
17755 case E_V16QImode:
17756 type = int_ftype_int_v16qi_v16qi;
17757 break;
17758 case E_V4SFmode:
17759 type = int_ftype_int_v4sf_v4sf;
17760 break;
17761 case E_V2DFmode:
17762 type = int_ftype_int_v2df_v2df;
17763 break;
17764 default:
17765 gcc_unreachable ();
17768 def_builtin (d->name, type, d->code);
17771 /* Initialize the abs* operators. */
17772 d = bdesc_abs;
17773 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17775 machine_mode mode0;
17776 tree type;
17777 HOST_WIDE_INT mask = d->mask;
17779 if ((mask & builtin_mask) != mask)
17781 if (TARGET_DEBUG_BUILTIN)
17782 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17783 d->name);
17784 continue;
17787 /* Cannot define builtin if the instruction is disabled. */
17788 gcc_assert (d->icode != CODE_FOR_nothing);
17789 mode0 = insn_data[d->icode].operand[0].mode;
17791 switch (mode0)
17793 case E_V2DImode:
17794 type = v2di_ftype_v2di;
17795 break;
17796 case E_V4SImode:
17797 type = v4si_ftype_v4si;
17798 break;
17799 case E_V8HImode:
17800 type = v8hi_ftype_v8hi;
17801 break;
17802 case E_V16QImode:
17803 type = v16qi_ftype_v16qi;
17804 break;
17805 case E_V4SFmode:
17806 type = v4sf_ftype_v4sf;
17807 break;
17808 case E_V2DFmode:
17809 type = v2df_ftype_v2df;
17810 break;
17811 default:
17812 gcc_unreachable ();
17815 def_builtin (d->name, type, d->code);
17818 /* Initialize target builtin that implements
17819 targetm.vectorize.builtin_mask_for_load. */
17821 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17822 v16qi_ftype_long_pcvoid,
17823 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17824 BUILT_IN_MD, NULL, NULL_TREE);
17825 TREE_READONLY (decl) = 1;
17826 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17827 altivec_builtin_mask_for_load = decl;
17829 /* Access to the vec_init patterns. */
17830 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17831 integer_type_node, integer_type_node,
17832 integer_type_node, NULL_TREE);
17833 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17835 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17836 short_integer_type_node,
17837 short_integer_type_node,
17838 short_integer_type_node,
17839 short_integer_type_node,
17840 short_integer_type_node,
17841 short_integer_type_node,
17842 short_integer_type_node, NULL_TREE);
17843 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17845 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17846 char_type_node, char_type_node,
17847 char_type_node, char_type_node,
17848 char_type_node, char_type_node,
17849 char_type_node, char_type_node,
17850 char_type_node, char_type_node,
17851 char_type_node, char_type_node,
17852 char_type_node, char_type_node,
17853 char_type_node, NULL_TREE);
17854 def_builtin ("__builtin_vec_init_v16qi", ftype,
17855 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17857 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17858 float_type_node, float_type_node,
17859 float_type_node, NULL_TREE);
17860 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17862 /* VSX builtins. */
17863 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17864 double_type_node, NULL_TREE);
17865 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17867 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17868 intDI_type_node, NULL_TREE);
17869 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17871 /* Access to the vec_set patterns. */
17872 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17873 intSI_type_node,
17874 integer_type_node, NULL_TREE);
17875 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17877 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17878 intHI_type_node,
17879 integer_type_node, NULL_TREE);
17880 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17882 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17883 intQI_type_node,
17884 integer_type_node, NULL_TREE);
17885 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17887 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17888 float_type_node,
17889 integer_type_node, NULL_TREE);
17890 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17892 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17893 double_type_node,
17894 integer_type_node, NULL_TREE);
17895 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17897 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17898 intDI_type_node,
17899 integer_type_node, NULL_TREE);
17900 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17902 /* Access to the vec_extract patterns. */
17903 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17904 integer_type_node, NULL_TREE);
17905 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17907 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17908 integer_type_node, NULL_TREE);
17909 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17911 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17912 integer_type_node, NULL_TREE);
17913 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17915 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17916 integer_type_node, NULL_TREE);
17917 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17919 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17920 integer_type_node, NULL_TREE);
17921 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17923 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17924 integer_type_node, NULL_TREE);
17925 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17928 if (V1TI_type_node)
17930 tree v1ti_ftype_long_pcvoid
17931 = build_function_type_list (V1TI_type_node,
17932 long_integer_type_node, pcvoid_type_node,
17933 NULL_TREE);
17934 tree void_ftype_v1ti_long_pvoid
17935 = build_function_type_list (void_type_node,
17936 V1TI_type_node, long_integer_type_node,
17937 pvoid_type_node, NULL_TREE);
17938 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17939 VSX_BUILTIN_LXVD2X_V1TI);
17940 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17941 VSX_BUILTIN_STXVD2X_V1TI);
17942 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17943 NULL_TREE, NULL_TREE);
17944 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17945 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17946 intTI_type_node,
17947 integer_type_node, NULL_TREE);
17948 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17949 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17950 integer_type_node, NULL_TREE);
17951 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17956 static void
17957 htm_init_builtins (void)
17959 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17960 const struct builtin_description *d;
17961 size_t i;
17963 d = bdesc_htm;
17964 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17966 tree op[MAX_HTM_OPERANDS], type;
17967 HOST_WIDE_INT mask = d->mask;
17968 unsigned attr = rs6000_builtin_info[d->code].attr;
17969 bool void_func = (attr & RS6000_BTC_VOID);
17970 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17971 int nopnds = 0;
17972 tree gpr_type_node;
17973 tree rettype;
17974 tree argtype;
17976 /* It is expected that these htm built-in functions may have
17977 d->icode equal to CODE_FOR_nothing. */
17979 if (TARGET_32BIT && TARGET_POWERPC64)
17980 gpr_type_node = long_long_unsigned_type_node;
17981 else
17982 gpr_type_node = long_unsigned_type_node;
17984 if (attr & RS6000_BTC_SPR)
17986 rettype = gpr_type_node;
17987 argtype = gpr_type_node;
17989 else if (d->code == HTM_BUILTIN_TABORTDC
17990 || d->code == HTM_BUILTIN_TABORTDCI)
17992 rettype = unsigned_type_node;
17993 argtype = gpr_type_node;
17995 else
17997 rettype = unsigned_type_node;
17998 argtype = unsigned_type_node;
18001 if ((mask & builtin_mask) != mask)
18003 if (TARGET_DEBUG_BUILTIN)
18004 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
18005 continue;
18008 if (d->name == 0)
18010 if (TARGET_DEBUG_BUILTIN)
18011 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
18012 (long unsigned) i);
18013 continue;
18016 op[nopnds++] = (void_func) ? void_type_node : rettype;
18018 if (attr_args == RS6000_BTC_UNARY)
18019 op[nopnds++] = argtype;
18020 else if (attr_args == RS6000_BTC_BINARY)
18022 op[nopnds++] = argtype;
18023 op[nopnds++] = argtype;
18025 else if (attr_args == RS6000_BTC_TERNARY)
18027 op[nopnds++] = argtype;
18028 op[nopnds++] = argtype;
18029 op[nopnds++] = argtype;
18032 switch (nopnds)
18034 case 1:
18035 type = build_function_type_list (op[0], NULL_TREE);
18036 break;
18037 case 2:
18038 type = build_function_type_list (op[0], op[1], NULL_TREE);
18039 break;
18040 case 3:
18041 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
18042 break;
18043 case 4:
18044 type = build_function_type_list (op[0], op[1], op[2], op[3],
18045 NULL_TREE);
18046 break;
18047 default:
18048 gcc_unreachable ();
18051 def_builtin (d->name, type, d->code);
18055 /* Hash function for builtin functions with up to 3 arguments and a return
18056 type. */
18057 hashval_t
18058 builtin_hasher::hash (builtin_hash_struct *bh)
18060 unsigned ret = 0;
18061 int i;
18063 for (i = 0; i < 4; i++)
18065 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
18066 ret = (ret * 2) + bh->uns_p[i];
18069 return ret;
18072 /* Compare builtin hash entries H1 and H2 for equivalence. */
18073 bool
18074 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18076 return ((p1->mode[0] == p2->mode[0])
18077 && (p1->mode[1] == p2->mode[1])
18078 && (p1->mode[2] == p2->mode[2])
18079 && (p1->mode[3] == p2->mode[3])
18080 && (p1->uns_p[0] == p2->uns_p[0])
18081 && (p1->uns_p[1] == p2->uns_p[1])
18082 && (p1->uns_p[2] == p2->uns_p[2])
18083 && (p1->uns_p[3] == p2->uns_p[3]));
18086 /* Map types for builtin functions with an explicit return type and up to 3
18087 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18088 of the argument. */
18089 static tree
18090 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18091 machine_mode mode_arg1, machine_mode mode_arg2,
18092 enum rs6000_builtins builtin, const char *name)
18094 struct builtin_hash_struct h;
18095 struct builtin_hash_struct *h2;
18096 int num_args = 3;
18097 int i;
18098 tree ret_type = NULL_TREE;
18099 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18101 /* Create builtin_hash_table. */
18102 if (builtin_hash_table == NULL)
18103 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18105 h.type = NULL_TREE;
18106 h.mode[0] = mode_ret;
18107 h.mode[1] = mode_arg0;
18108 h.mode[2] = mode_arg1;
18109 h.mode[3] = mode_arg2;
18110 h.uns_p[0] = 0;
18111 h.uns_p[1] = 0;
18112 h.uns_p[2] = 0;
18113 h.uns_p[3] = 0;
18115 /* If the builtin is a type that produces unsigned results or takes unsigned
18116 arguments, and it is returned as a decl for the vectorizer (such as
18117 widening multiplies, permute), make sure the arguments and return value
18118 are type correct. */
18119 switch (builtin)
18121 /* unsigned 1 argument functions. */
18122 case CRYPTO_BUILTIN_VSBOX:
18123 case P8V_BUILTIN_VGBBD:
18124 case MISC_BUILTIN_CDTBCD:
18125 case MISC_BUILTIN_CBCDTD:
18126 h.uns_p[0] = 1;
18127 h.uns_p[1] = 1;
18128 break;
18130 /* unsigned 2 argument functions. */
18131 case ALTIVEC_BUILTIN_VMULEUB:
18132 case ALTIVEC_BUILTIN_VMULEUH:
18133 case ALTIVEC_BUILTIN_VMULEUW:
18134 case ALTIVEC_BUILTIN_VMULOUB:
18135 case ALTIVEC_BUILTIN_VMULOUH:
18136 case ALTIVEC_BUILTIN_VMULOUW:
18137 case CRYPTO_BUILTIN_VCIPHER:
18138 case CRYPTO_BUILTIN_VCIPHERLAST:
18139 case CRYPTO_BUILTIN_VNCIPHER:
18140 case CRYPTO_BUILTIN_VNCIPHERLAST:
18141 case CRYPTO_BUILTIN_VPMSUMB:
18142 case CRYPTO_BUILTIN_VPMSUMH:
18143 case CRYPTO_BUILTIN_VPMSUMW:
18144 case CRYPTO_BUILTIN_VPMSUMD:
18145 case CRYPTO_BUILTIN_VPMSUM:
18146 case MISC_BUILTIN_ADDG6S:
18147 case MISC_BUILTIN_DIVWEU:
18148 case MISC_BUILTIN_DIVWEUO:
18149 case MISC_BUILTIN_DIVDEU:
18150 case MISC_BUILTIN_DIVDEUO:
18151 case VSX_BUILTIN_UDIV_V2DI:
18152 case ALTIVEC_BUILTIN_VMAXUB:
18153 case ALTIVEC_BUILTIN_VMINUB:
18154 case ALTIVEC_BUILTIN_VMAXUH:
18155 case ALTIVEC_BUILTIN_VMINUH:
18156 case ALTIVEC_BUILTIN_VMAXUW:
18157 case ALTIVEC_BUILTIN_VMINUW:
18158 case P8V_BUILTIN_VMAXUD:
18159 case P8V_BUILTIN_VMINUD:
18160 h.uns_p[0] = 1;
18161 h.uns_p[1] = 1;
18162 h.uns_p[2] = 1;
18163 break;
18165 /* unsigned 3 argument functions. */
18166 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18167 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18168 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18169 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18170 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18171 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18172 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18173 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18174 case VSX_BUILTIN_VPERM_16QI_UNS:
18175 case VSX_BUILTIN_VPERM_8HI_UNS:
18176 case VSX_BUILTIN_VPERM_4SI_UNS:
18177 case VSX_BUILTIN_VPERM_2DI_UNS:
18178 case VSX_BUILTIN_XXSEL_16QI_UNS:
18179 case VSX_BUILTIN_XXSEL_8HI_UNS:
18180 case VSX_BUILTIN_XXSEL_4SI_UNS:
18181 case VSX_BUILTIN_XXSEL_2DI_UNS:
18182 case CRYPTO_BUILTIN_VPERMXOR:
18183 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18184 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18185 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18186 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18187 case CRYPTO_BUILTIN_VSHASIGMAW:
18188 case CRYPTO_BUILTIN_VSHASIGMAD:
18189 case CRYPTO_BUILTIN_VSHASIGMA:
18190 h.uns_p[0] = 1;
18191 h.uns_p[1] = 1;
18192 h.uns_p[2] = 1;
18193 h.uns_p[3] = 1;
18194 break;
18196 /* signed permute functions with unsigned char mask. */
18197 case ALTIVEC_BUILTIN_VPERM_16QI:
18198 case ALTIVEC_BUILTIN_VPERM_8HI:
18199 case ALTIVEC_BUILTIN_VPERM_4SI:
18200 case ALTIVEC_BUILTIN_VPERM_4SF:
18201 case ALTIVEC_BUILTIN_VPERM_2DI:
18202 case ALTIVEC_BUILTIN_VPERM_2DF:
18203 case VSX_BUILTIN_VPERM_16QI:
18204 case VSX_BUILTIN_VPERM_8HI:
18205 case VSX_BUILTIN_VPERM_4SI:
18206 case VSX_BUILTIN_VPERM_4SF:
18207 case VSX_BUILTIN_VPERM_2DI:
18208 case VSX_BUILTIN_VPERM_2DF:
18209 h.uns_p[3] = 1;
18210 break;
18212 /* unsigned args, signed return. */
18213 case VSX_BUILTIN_XVCVUXDSP:
18214 case VSX_BUILTIN_XVCVUXDDP_UNS:
18215 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18216 h.uns_p[1] = 1;
18217 break;
18219 /* signed args, unsigned return. */
18220 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18221 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18222 case MISC_BUILTIN_UNPACK_TD:
18223 case MISC_BUILTIN_UNPACK_V1TI:
18224 h.uns_p[0] = 1;
18225 break;
18227 /* unsigned arguments, bool return (compares). */
18228 case ALTIVEC_BUILTIN_VCMPEQUB:
18229 case ALTIVEC_BUILTIN_VCMPEQUH:
18230 case ALTIVEC_BUILTIN_VCMPEQUW:
18231 case P8V_BUILTIN_VCMPEQUD:
18232 case VSX_BUILTIN_CMPGE_U16QI:
18233 case VSX_BUILTIN_CMPGE_U8HI:
18234 case VSX_BUILTIN_CMPGE_U4SI:
18235 case VSX_BUILTIN_CMPGE_U2DI:
18236 case ALTIVEC_BUILTIN_VCMPGTUB:
18237 case ALTIVEC_BUILTIN_VCMPGTUH:
18238 case ALTIVEC_BUILTIN_VCMPGTUW:
18239 case P8V_BUILTIN_VCMPGTUD:
18240 h.uns_p[1] = 1;
18241 h.uns_p[2] = 1;
18242 break;
18244 /* unsigned arguments for 128-bit pack instructions. */
18245 case MISC_BUILTIN_PACK_TD:
18246 case MISC_BUILTIN_PACK_V1TI:
18247 h.uns_p[1] = 1;
18248 h.uns_p[2] = 1;
18249 break;
18251 /* unsigned second arguments (vector shift right). */
18252 case ALTIVEC_BUILTIN_VSRB:
18253 case ALTIVEC_BUILTIN_VSRH:
18254 case ALTIVEC_BUILTIN_VSRW:
18255 case P8V_BUILTIN_VSRD:
18256 h.uns_p[2] = 1;
18257 break;
18259 default:
18260 break;
18263 /* Figure out how many args are present. */
18264 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18265 num_args--;
18267 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18268 if (!ret_type && h.uns_p[0])
18269 ret_type = builtin_mode_to_type[h.mode[0]][0];
18271 if (!ret_type)
18272 fatal_error (input_location,
18273 "internal error: builtin function %qs had an unexpected "
18274 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18276 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18277 arg_type[i] = NULL_TREE;
18279 for (i = 0; i < num_args; i++)
18281 int m = (int) h.mode[i+1];
18282 int uns_p = h.uns_p[i+1];
18284 arg_type[i] = builtin_mode_to_type[m][uns_p];
18285 if (!arg_type[i] && uns_p)
18286 arg_type[i] = builtin_mode_to_type[m][0];
18288 if (!arg_type[i])
18289 fatal_error (input_location,
18290 "internal error: builtin function %qs, argument %d "
18291 "had unexpected argument type %qs", name, i,
18292 GET_MODE_NAME (m));
18295 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18296 if (*found == NULL)
18298 h2 = ggc_alloc<builtin_hash_struct> ();
18299 *h2 = h;
18300 *found = h2;
18302 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18303 arg_type[2], NULL_TREE);
18306 return (*found)->type;
18309 static void
18310 rs6000_common_init_builtins (void)
18312 const struct builtin_description *d;
18313 size_t i;
18315 tree opaque_ftype_opaque = NULL_TREE;
18316 tree opaque_ftype_opaque_opaque = NULL_TREE;
18317 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18318 tree v2si_ftype = NULL_TREE;
18319 tree v2si_ftype_qi = NULL_TREE;
18320 tree v2si_ftype_v2si_qi = NULL_TREE;
18321 tree v2si_ftype_int_qi = NULL_TREE;
18322 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18324 if (!TARGET_PAIRED_FLOAT)
18326 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18327 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18330 /* Paired builtins are only available if you build a compiler with the
18331 appropriate options, so only create those builtins with the appropriate
18332 compiler option. Create Altivec and VSX builtins on machines with at
18333 least the general purpose extensions (970 and newer) to allow the use of
18334 the target attribute.. */
18336 if (TARGET_EXTRA_BUILTINS)
18337 builtin_mask |= RS6000_BTM_COMMON;
18339 /* Add the ternary operators. */
18340 d = bdesc_3arg;
18341 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18343 tree type;
18344 HOST_WIDE_INT mask = d->mask;
18346 if ((mask & builtin_mask) != mask)
18348 if (TARGET_DEBUG_BUILTIN)
18349 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18350 continue;
18353 if (rs6000_overloaded_builtin_p (d->code))
18355 if (! (type = opaque_ftype_opaque_opaque_opaque))
18356 type = opaque_ftype_opaque_opaque_opaque
18357 = build_function_type_list (opaque_V4SI_type_node,
18358 opaque_V4SI_type_node,
18359 opaque_V4SI_type_node,
18360 opaque_V4SI_type_node,
18361 NULL_TREE);
18363 else
18365 enum insn_code icode = d->icode;
18366 if (d->name == 0)
18368 if (TARGET_DEBUG_BUILTIN)
18369 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18370 (long unsigned)i);
18372 continue;
18375 if (icode == CODE_FOR_nothing)
18377 if (TARGET_DEBUG_BUILTIN)
18378 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18379 d->name);
18381 continue;
18384 type = builtin_function_type (insn_data[icode].operand[0].mode,
18385 insn_data[icode].operand[1].mode,
18386 insn_data[icode].operand[2].mode,
18387 insn_data[icode].operand[3].mode,
18388 d->code, d->name);
18391 def_builtin (d->name, type, d->code);
18394 /* Add the binary operators. */
18395 d = bdesc_2arg;
18396 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18398 machine_mode mode0, mode1, mode2;
18399 tree type;
18400 HOST_WIDE_INT mask = d->mask;
18402 if ((mask & builtin_mask) != mask)
18404 if (TARGET_DEBUG_BUILTIN)
18405 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18406 continue;
18409 if (rs6000_overloaded_builtin_p (d->code))
18411 if (! (type = opaque_ftype_opaque_opaque))
18412 type = opaque_ftype_opaque_opaque
18413 = build_function_type_list (opaque_V4SI_type_node,
18414 opaque_V4SI_type_node,
18415 opaque_V4SI_type_node,
18416 NULL_TREE);
18418 else
18420 enum insn_code icode = d->icode;
18421 if (d->name == 0)
18423 if (TARGET_DEBUG_BUILTIN)
18424 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18425 (long unsigned)i);
18427 continue;
18430 if (icode == CODE_FOR_nothing)
18432 if (TARGET_DEBUG_BUILTIN)
18433 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18434 d->name);
18436 continue;
18439 mode0 = insn_data[icode].operand[0].mode;
18440 mode1 = insn_data[icode].operand[1].mode;
18441 mode2 = insn_data[icode].operand[2].mode;
18443 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18445 if (! (type = v2si_ftype_v2si_qi))
18446 type = v2si_ftype_v2si_qi
18447 = build_function_type_list (opaque_V2SI_type_node,
18448 opaque_V2SI_type_node,
18449 char_type_node,
18450 NULL_TREE);
18453 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18454 && mode2 == QImode)
18456 if (! (type = v2si_ftype_int_qi))
18457 type = v2si_ftype_int_qi
18458 = build_function_type_list (opaque_V2SI_type_node,
18459 integer_type_node,
18460 char_type_node,
18461 NULL_TREE);
18464 else
18465 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18466 d->code, d->name);
18469 def_builtin (d->name, type, d->code);
18472 /* Add the simple unary operators. */
18473 d = bdesc_1arg;
18474 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18476 machine_mode mode0, mode1;
18477 tree type;
18478 HOST_WIDE_INT mask = d->mask;
18480 if ((mask & builtin_mask) != mask)
18482 if (TARGET_DEBUG_BUILTIN)
18483 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18484 continue;
18487 if (rs6000_overloaded_builtin_p (d->code))
18489 if (! (type = opaque_ftype_opaque))
18490 type = opaque_ftype_opaque
18491 = build_function_type_list (opaque_V4SI_type_node,
18492 opaque_V4SI_type_node,
18493 NULL_TREE);
18495 else
18497 enum insn_code icode = d->icode;
18498 if (d->name == 0)
18500 if (TARGET_DEBUG_BUILTIN)
18501 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18502 (long unsigned)i);
18504 continue;
18507 if (icode == CODE_FOR_nothing)
18509 if (TARGET_DEBUG_BUILTIN)
18510 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18511 d->name);
18513 continue;
18516 mode0 = insn_data[icode].operand[0].mode;
18517 mode1 = insn_data[icode].operand[1].mode;
18519 if (mode0 == V2SImode && mode1 == QImode)
18521 if (! (type = v2si_ftype_qi))
18522 type = v2si_ftype_qi
18523 = build_function_type_list (opaque_V2SI_type_node,
18524 char_type_node,
18525 NULL_TREE);
18528 else
18529 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18530 d->code, d->name);
18533 def_builtin (d->name, type, d->code);
18536 /* Add the simple no-argument operators. */
18537 d = bdesc_0arg;
18538 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18540 machine_mode mode0;
18541 tree type;
18542 HOST_WIDE_INT mask = d->mask;
18544 if ((mask & builtin_mask) != mask)
18546 if (TARGET_DEBUG_BUILTIN)
18547 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18548 continue;
18550 if (rs6000_overloaded_builtin_p (d->code))
18552 if (!opaque_ftype_opaque)
18553 opaque_ftype_opaque
18554 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18555 type = opaque_ftype_opaque;
18557 else
18559 enum insn_code icode = d->icode;
18560 if (d->name == 0)
18562 if (TARGET_DEBUG_BUILTIN)
18563 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18564 (long unsigned) i);
18565 continue;
18567 if (icode == CODE_FOR_nothing)
18569 if (TARGET_DEBUG_BUILTIN)
18570 fprintf (stderr,
18571 "rs6000_builtin, skip no-argument %s (no code)\n",
18572 d->name);
18573 continue;
18575 mode0 = insn_data[icode].operand[0].mode;
18576 if (mode0 == V2SImode)
18578 /* code for paired single */
18579 if (! (type = v2si_ftype))
18581 v2si_ftype
18582 = build_function_type_list (opaque_V2SI_type_node,
18583 NULL_TREE);
18584 type = v2si_ftype;
18587 else
18588 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18589 d->code, d->name);
18591 def_builtin (d->name, type, d->code);
18595 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18596 static void
18597 init_float128_ibm (machine_mode mode)
18599 if (!TARGET_XL_COMPAT)
18601 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18602 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18603 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18604 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18606 if (!TARGET_HARD_FLOAT)
18608 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18609 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18610 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18611 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18612 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18613 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18614 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18615 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18617 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18618 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18619 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18620 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18621 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18622 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18623 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18624 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18627 else
18629 set_optab_libfunc (add_optab, mode, "_xlqadd");
18630 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18631 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18632 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18635 /* Add various conversions for IFmode to use the traditional TFmode
18636 names. */
18637 if (mode == IFmode)
18639 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18640 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18641 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18642 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18643 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18644 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18646 if (TARGET_POWERPC64)
18648 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18649 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18650 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18651 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18656 /* Set up IEEE 128-bit floating point routines. Use different names if the
18657 arguments can be passed in a vector register. The historical PowerPC
18658 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18659 continue to use that if we aren't using vector registers to pass IEEE
18660 128-bit floating point. */
18662 static void
18663 init_float128_ieee (machine_mode mode)
18665 if (FLOAT128_VECTOR_P (mode))
18667 set_optab_libfunc (add_optab, mode, "__addkf3");
18668 set_optab_libfunc (sub_optab, mode, "__subkf3");
18669 set_optab_libfunc (neg_optab, mode, "__negkf2");
18670 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18671 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18672 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18673 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18675 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18676 set_optab_libfunc (ne_optab, mode, "__nekf2");
18677 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18678 set_optab_libfunc (ge_optab, mode, "__gekf2");
18679 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18680 set_optab_libfunc (le_optab, mode, "__lekf2");
18681 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18683 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18684 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18685 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18686 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18688 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18689 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18690 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18692 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18693 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18694 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18696 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18697 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18698 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18699 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18700 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18701 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18703 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18704 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18705 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18706 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18708 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18709 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18710 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18711 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18713 if (TARGET_POWERPC64)
18715 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18716 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18717 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18718 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18722 else
18724 set_optab_libfunc (add_optab, mode, "_q_add");
18725 set_optab_libfunc (sub_optab, mode, "_q_sub");
18726 set_optab_libfunc (neg_optab, mode, "_q_neg");
18727 set_optab_libfunc (smul_optab, mode, "_q_mul");
18728 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18729 if (TARGET_PPC_GPOPT)
18730 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18732 set_optab_libfunc (eq_optab, mode, "_q_feq");
18733 set_optab_libfunc (ne_optab, mode, "_q_fne");
18734 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18735 set_optab_libfunc (ge_optab, mode, "_q_fge");
18736 set_optab_libfunc (lt_optab, mode, "_q_flt");
18737 set_optab_libfunc (le_optab, mode, "_q_fle");
18739 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18740 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18741 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18742 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18743 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18744 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18745 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18746 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18750 static void
18751 rs6000_init_libfuncs (void)
18753 /* __float128 support. */
18754 if (TARGET_FLOAT128_TYPE)
18756 init_float128_ibm (IFmode);
18757 init_float128_ieee (KFmode);
18760 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18761 if (TARGET_LONG_DOUBLE_128)
18763 if (!TARGET_IEEEQUAD)
18764 init_float128_ibm (TFmode);
18766 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18767 else
18768 init_float128_ieee (TFmode);
18772 /* Emit a potentially record-form instruction, setting DST from SRC.
18773 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18774 signed comparison of DST with zero. If DOT is 1, the generated RTL
18775 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18776 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18777 a separate COMPARE. */
18779 void
18780 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18782 if (dot == 0)
18784 emit_move_insn (dst, src);
18785 return;
18788 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18790 emit_move_insn (dst, src);
18791 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18792 return;
18795 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18796 if (dot == 1)
18798 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18799 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18801 else
18803 rtx set = gen_rtx_SET (dst, src);
18804 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18809 /* A validation routine: say whether CODE, a condition code, and MODE
18810 match. The other alternatives either don't make sense or should
18811 never be generated. */
18813 void
18814 validate_condition_mode (enum rtx_code code, machine_mode mode)
18816 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18817 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18818 && GET_MODE_CLASS (mode) == MODE_CC);
18820 /* These don't make sense. */
18821 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18822 || mode != CCUNSmode);
18824 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18825 || mode == CCUNSmode);
18827 gcc_assert (mode == CCFPmode
18828 || (code != ORDERED && code != UNORDERED
18829 && code != UNEQ && code != LTGT
18830 && code != UNGT && code != UNLT
18831 && code != UNGE && code != UNLE));
18833 /* These should never be generated except for
18834 flag_finite_math_only. */
18835 gcc_assert (mode != CCFPmode
18836 || flag_finite_math_only
18837 || (code != LE && code != GE
18838 && code != UNEQ && code != LTGT
18839 && code != UNGT && code != UNLT));
18841 /* These are invalid; the information is not there. */
18842 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18846 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18847 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18848 not zero, store there the bit offset (counted from the right) where
18849 the single stretch of 1 bits begins; and similarly for B, the bit
18850 offset where it ends. */
18852 bool
18853 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18855 unsigned HOST_WIDE_INT val = INTVAL (mask);
18856 unsigned HOST_WIDE_INT bit;
18857 int nb, ne;
18858 int n = GET_MODE_PRECISION (mode);
18860 if (mode != DImode && mode != SImode)
18861 return false;
18863 if (INTVAL (mask) >= 0)
18865 bit = val & -val;
18866 ne = exact_log2 (bit);
18867 nb = exact_log2 (val + bit);
18869 else if (val + 1 == 0)
18871 nb = n;
18872 ne = 0;
18874 else if (val & 1)
18876 val = ~val;
18877 bit = val & -val;
18878 nb = exact_log2 (bit);
18879 ne = exact_log2 (val + bit);
18881 else
18883 bit = val & -val;
18884 ne = exact_log2 (bit);
18885 if (val + bit == 0)
18886 nb = n;
18887 else
18888 nb = 0;
18891 nb--;
18893 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18894 return false;
18896 if (b)
18897 *b = nb;
18898 if (e)
18899 *e = ne;
18901 return true;
18904 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18905 or rldicr instruction, to implement an AND with it in mode MODE. */
18907 bool
18908 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18910 int nb, ne;
18912 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18913 return false;
18915 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18916 does not wrap. */
18917 if (mode == DImode)
18918 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18920 /* For SImode, rlwinm can do everything. */
18921 if (mode == SImode)
18922 return (nb < 32 && ne < 32);
18924 return false;
18927 /* Return the instruction template for an AND with mask in mode MODE, with
18928 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18930 const char *
18931 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18933 int nb, ne;
18935 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18936 gcc_unreachable ();
18938 if (mode == DImode && ne == 0)
18940 operands[3] = GEN_INT (63 - nb);
18941 if (dot)
18942 return "rldicl. %0,%1,0,%3";
18943 return "rldicl %0,%1,0,%3";
18946 if (mode == DImode && nb == 63)
18948 operands[3] = GEN_INT (63 - ne);
18949 if (dot)
18950 return "rldicr. %0,%1,0,%3";
18951 return "rldicr %0,%1,0,%3";
18954 if (nb < 32 && ne < 32)
18956 operands[3] = GEN_INT (31 - nb);
18957 operands[4] = GEN_INT (31 - ne);
18958 if (dot)
18959 return "rlwinm. %0,%1,0,%3,%4";
18960 return "rlwinm %0,%1,0,%3,%4";
18963 gcc_unreachable ();
18966 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18967 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18968 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18970 bool
18971 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18973 int nb, ne;
18975 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18976 return false;
18978 int n = GET_MODE_PRECISION (mode);
18979 int sh = -1;
18981 if (CONST_INT_P (XEXP (shift, 1)))
18983 sh = INTVAL (XEXP (shift, 1));
18984 if (sh < 0 || sh >= n)
18985 return false;
18988 rtx_code code = GET_CODE (shift);
18990 /* Convert any shift by 0 to a rotate, to simplify below code. */
18991 if (sh == 0)
18992 code = ROTATE;
18994 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18995 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18996 code = ASHIFT;
18997 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18999 code = LSHIFTRT;
19000 sh = n - sh;
19003 /* DImode rotates need rld*. */
19004 if (mode == DImode && code == ROTATE)
19005 return (nb == 63 || ne == 0 || ne == sh);
19007 /* SImode rotates need rlw*. */
19008 if (mode == SImode && code == ROTATE)
19009 return (nb < 32 && ne < 32 && sh < 32);
19011 /* Wrap-around masks are only okay for rotates. */
19012 if (ne > nb)
19013 return false;
19015 /* Variable shifts are only okay for rotates. */
19016 if (sh < 0)
19017 return false;
19019 /* Don't allow ASHIFT if the mask is wrong for that. */
19020 if (code == ASHIFT && ne < sh)
19021 return false;
19023 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
19024 if the mask is wrong for that. */
19025 if (nb < 32 && ne < 32 && sh < 32
19026 && !(code == LSHIFTRT && nb >= 32 - sh))
19027 return true;
19029 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
19030 if the mask is wrong for that. */
19031 if (code == LSHIFTRT)
19032 sh = 64 - sh;
19033 if (nb == 63 || ne == 0 || ne == sh)
19034 return !(code == LSHIFTRT && nb >= sh);
19036 return false;
19039 /* Return the instruction template for a shift with mask in mode MODE, with
19040 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19042 const char *
19043 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
19045 int nb, ne;
19047 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19048 gcc_unreachable ();
19050 if (mode == DImode && ne == 0)
19052 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19053 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
19054 operands[3] = GEN_INT (63 - nb);
19055 if (dot)
19056 return "rld%I2cl. %0,%1,%2,%3";
19057 return "rld%I2cl %0,%1,%2,%3";
19060 if (mode == DImode && nb == 63)
19062 operands[3] = GEN_INT (63 - ne);
19063 if (dot)
19064 return "rld%I2cr. %0,%1,%2,%3";
19065 return "rld%I2cr %0,%1,%2,%3";
19068 if (mode == DImode
19069 && GET_CODE (operands[4]) != LSHIFTRT
19070 && CONST_INT_P (operands[2])
19071 && ne == INTVAL (operands[2]))
19073 operands[3] = GEN_INT (63 - nb);
19074 if (dot)
19075 return "rld%I2c. %0,%1,%2,%3";
19076 return "rld%I2c %0,%1,%2,%3";
19079 if (nb < 32 && ne < 32)
19081 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19082 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19083 operands[3] = GEN_INT (31 - nb);
19084 operands[4] = GEN_INT (31 - ne);
19085 /* This insn can also be a 64-bit rotate with mask that really makes
19086 it just a shift right (with mask); the %h below are to adjust for
19087 that situation (shift count is >= 32 in that case). */
19088 if (dot)
19089 return "rlw%I2nm. %0,%1,%h2,%3,%4";
19090 return "rlw%I2nm %0,%1,%h2,%3,%4";
19093 gcc_unreachable ();
19096 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
19097 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
19098 ASHIFT, or LSHIFTRT) in mode MODE. */
19100 bool
19101 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
19103 int nb, ne;
19105 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19106 return false;
19108 int n = GET_MODE_PRECISION (mode);
19110 int sh = INTVAL (XEXP (shift, 1));
19111 if (sh < 0 || sh >= n)
19112 return false;
19114 rtx_code code = GET_CODE (shift);
19116 /* Convert any shift by 0 to a rotate, to simplify below code. */
19117 if (sh == 0)
19118 code = ROTATE;
19120 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19121 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19122 code = ASHIFT;
19123 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19125 code = LSHIFTRT;
19126 sh = n - sh;
19129 /* DImode rotates need rldimi. */
19130 if (mode == DImode && code == ROTATE)
19131 return (ne == sh);
19133 /* SImode rotates need rlwimi. */
19134 if (mode == SImode && code == ROTATE)
19135 return (nb < 32 && ne < 32 && sh < 32);
19137 /* Wrap-around masks are only okay for rotates. */
19138 if (ne > nb)
19139 return false;
19141 /* Don't allow ASHIFT if the mask is wrong for that. */
19142 if (code == ASHIFT && ne < sh)
19143 return false;
19145 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19146 if the mask is wrong for that. */
19147 if (nb < 32 && ne < 32 && sh < 32
19148 && !(code == LSHIFTRT && nb >= 32 - sh))
19149 return true;
19151 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19152 if the mask is wrong for that. */
19153 if (code == LSHIFTRT)
19154 sh = 64 - sh;
19155 if (ne == sh)
19156 return !(code == LSHIFTRT && nb >= sh);
19158 return false;
19161 /* Return the instruction template for an insert with mask in mode MODE, with
19162 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19164 const char *
19165 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19167 int nb, ne;
19169 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19170 gcc_unreachable ();
19172 /* Prefer rldimi because rlwimi is cracked. */
19173 if (TARGET_POWERPC64
19174 && (!dot || mode == DImode)
19175 && GET_CODE (operands[4]) != LSHIFTRT
19176 && ne == INTVAL (operands[2]))
19178 operands[3] = GEN_INT (63 - nb);
19179 if (dot)
19180 return "rldimi. %0,%1,%2,%3";
19181 return "rldimi %0,%1,%2,%3";
19184 if (nb < 32 && ne < 32)
19186 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19187 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19188 operands[3] = GEN_INT (31 - nb);
19189 operands[4] = GEN_INT (31 - ne);
19190 if (dot)
19191 return "rlwimi. %0,%1,%2,%3,%4";
19192 return "rlwimi %0,%1,%2,%3,%4";
19195 gcc_unreachable ();
19198 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19199 using two machine instructions. */
19201 bool
19202 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19204 /* There are two kinds of AND we can handle with two insns:
19205 1) those we can do with two rl* insn;
19206 2) ori[s];xori[s].
19208 We do not handle that last case yet. */
19210 /* If there is just one stretch of ones, we can do it. */
19211 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19212 return true;
19214 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19215 one insn, we can do the whole thing with two. */
19216 unsigned HOST_WIDE_INT val = INTVAL (c);
19217 unsigned HOST_WIDE_INT bit1 = val & -val;
19218 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19219 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19220 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19221 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19224 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19225 If EXPAND is true, split rotate-and-mask instructions we generate to
19226 their constituent parts as well (this is used during expand); if DOT
19227 is 1, make the last insn a record-form instruction clobbering the
19228 destination GPR and setting the CC reg (from operands[3]); if 2, set
19229 that GPR as well as the CC reg. */
19231 void
19232 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19234 gcc_assert (!(expand && dot));
19236 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19238 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19239 shift right. This generates better code than doing the masks without
19240 shifts, or shifting first right and then left. */
19241 int nb, ne;
19242 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19244 gcc_assert (mode == DImode);
19246 int shift = 63 - nb;
19247 if (expand)
19249 rtx tmp1 = gen_reg_rtx (DImode);
19250 rtx tmp2 = gen_reg_rtx (DImode);
19251 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19252 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19253 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19255 else
19257 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19258 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19259 emit_move_insn (operands[0], tmp);
19260 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19261 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19263 return;
19266 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19267 that does the rest. */
19268 unsigned HOST_WIDE_INT bit1 = val & -val;
19269 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19270 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19271 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19273 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19274 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19276 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19278 /* Two "no-rotate"-and-mask instructions, for SImode. */
19279 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19281 gcc_assert (mode == SImode);
19283 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19284 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19285 emit_move_insn (reg, tmp);
19286 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19287 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19288 return;
19291 gcc_assert (mode == DImode);
19293 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19294 insns; we have to do the first in SImode, because it wraps. */
19295 if (mask2 <= 0xffffffff
19296 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19298 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19299 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19300 GEN_INT (mask1));
19301 rtx reg_low = gen_lowpart (SImode, reg);
19302 emit_move_insn (reg_low, tmp);
19303 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19304 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19305 return;
19308 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19309 at the top end), rotate back and clear the other hole. */
19310 int right = exact_log2 (bit3);
19311 int left = 64 - right;
19313 /* Rotate the mask too. */
19314 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19316 if (expand)
19318 rtx tmp1 = gen_reg_rtx (DImode);
19319 rtx tmp2 = gen_reg_rtx (DImode);
19320 rtx tmp3 = gen_reg_rtx (DImode);
19321 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19322 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19323 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19324 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19326 else
19328 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19329 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19330 emit_move_insn (operands[0], tmp);
19331 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19332 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19333 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19337 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19338 for lfq and stfq insns iff the registers are hard registers. */
19341 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19343 /* We might have been passed a SUBREG. */
19344 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19345 return 0;
19347 /* We might have been passed non floating point registers. */
19348 if (!FP_REGNO_P (REGNO (reg1))
19349 || !FP_REGNO_P (REGNO (reg2)))
19350 return 0;
19352 return (REGNO (reg1) == REGNO (reg2) - 1);
19355 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19356 addr1 and addr2 must be in consecutive memory locations
19357 (addr2 == addr1 + 8). */
19360 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19362 rtx addr1, addr2;
19363 unsigned int reg1, reg2;
19364 int offset1, offset2;
19366 /* The mems cannot be volatile. */
19367 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19368 return 0;
19370 addr1 = XEXP (mem1, 0);
19371 addr2 = XEXP (mem2, 0);
19373 /* Extract an offset (if used) from the first addr. */
19374 if (GET_CODE (addr1) == PLUS)
19376 /* If not a REG, return zero. */
19377 if (GET_CODE (XEXP (addr1, 0)) != REG)
19378 return 0;
19379 else
19381 reg1 = REGNO (XEXP (addr1, 0));
19382 /* The offset must be constant! */
19383 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19384 return 0;
19385 offset1 = INTVAL (XEXP (addr1, 1));
19388 else if (GET_CODE (addr1) != REG)
19389 return 0;
19390 else
19392 reg1 = REGNO (addr1);
19393 /* This was a simple (mem (reg)) expression. Offset is 0. */
19394 offset1 = 0;
19397 /* And now for the second addr. */
19398 if (GET_CODE (addr2) == PLUS)
19400 /* If not a REG, return zero. */
19401 if (GET_CODE (XEXP (addr2, 0)) != REG)
19402 return 0;
19403 else
19405 reg2 = REGNO (XEXP (addr2, 0));
19406 /* The offset must be constant. */
19407 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19408 return 0;
19409 offset2 = INTVAL (XEXP (addr2, 1));
19412 else if (GET_CODE (addr2) != REG)
19413 return 0;
19414 else
19416 reg2 = REGNO (addr2);
19417 /* This was a simple (mem (reg)) expression. Offset is 0. */
19418 offset2 = 0;
19421 /* Both of these must have the same base register. */
19422 if (reg1 != reg2)
19423 return 0;
19425 /* The offset for the second addr must be 8 more than the first addr. */
19426 if (offset2 != offset1 + 8)
19427 return 0;
19429 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19430 instructions. */
19431 return 1;
19434 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19435 need to use DDmode, in all other cases we can use the same mode. */
19436 static machine_mode
19437 rs6000_secondary_memory_needed_mode (machine_mode mode)
19439 if (lra_in_progress && mode == SDmode)
19440 return DDmode;
19441 return mode;
19444 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19445 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19446 only work on the traditional altivec registers, note if an altivec register
19447 was chosen. */
19449 static enum rs6000_reg_type
19450 register_to_reg_type (rtx reg, bool *is_altivec)
19452 HOST_WIDE_INT regno;
19453 enum reg_class rclass;
19455 if (GET_CODE (reg) == SUBREG)
19456 reg = SUBREG_REG (reg);
19458 if (!REG_P (reg))
19459 return NO_REG_TYPE;
19461 regno = REGNO (reg);
19462 if (regno >= FIRST_PSEUDO_REGISTER)
19464 if (!lra_in_progress && !reload_completed)
19465 return PSEUDO_REG_TYPE;
19467 regno = true_regnum (reg);
19468 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19469 return PSEUDO_REG_TYPE;
19472 gcc_assert (regno >= 0);
19474 if (is_altivec && ALTIVEC_REGNO_P (regno))
19475 *is_altivec = true;
19477 rclass = rs6000_regno_regclass[regno];
19478 return reg_class_to_reg_type[(int)rclass];
19481 /* Helper function to return the cost of adding a TOC entry address. */
19483 static inline int
19484 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19486 int ret;
19488 if (TARGET_CMODEL != CMODEL_SMALL)
19489 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19491 else
19492 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19494 return ret;
19497 /* Helper function for rs6000_secondary_reload to determine whether the memory
19498 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19499 needs reloading. Return negative if the memory is not handled by the memory
19500 helper functions and to try a different reload method, 0 if no additional
19501 instructions are need, and positive to give the extra cost for the
19502 memory. */
19504 static int
19505 rs6000_secondary_reload_memory (rtx addr,
19506 enum reg_class rclass,
19507 machine_mode mode)
19509 int extra_cost = 0;
19510 rtx reg, and_arg, plus_arg0, plus_arg1;
19511 addr_mask_type addr_mask;
19512 const char *type = NULL;
19513 const char *fail_msg = NULL;
19515 if (GPR_REG_CLASS_P (rclass))
19516 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19518 else if (rclass == FLOAT_REGS)
19519 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19521 else if (rclass == ALTIVEC_REGS)
19522 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19524 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19525 else if (rclass == VSX_REGS)
19526 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19527 & ~RELOAD_REG_AND_M16);
19529 /* If the register allocator hasn't made up its mind yet on the register
19530 class to use, settle on defaults to use. */
19531 else if (rclass == NO_REGS)
19533 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19534 & ~RELOAD_REG_AND_M16);
19536 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19537 addr_mask &= ~(RELOAD_REG_INDEXED
19538 | RELOAD_REG_PRE_INCDEC
19539 | RELOAD_REG_PRE_MODIFY);
19542 else
19543 addr_mask = 0;
19545 /* If the register isn't valid in this register class, just return now. */
19546 if ((addr_mask & RELOAD_REG_VALID) == 0)
19548 if (TARGET_DEBUG_ADDR)
19550 fprintf (stderr,
19551 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19552 "not valid in class\n",
19553 GET_MODE_NAME (mode), reg_class_names[rclass]);
19554 debug_rtx (addr);
19557 return -1;
19560 switch (GET_CODE (addr))
19562 /* Does the register class supports auto update forms for this mode? We
19563 don't need a scratch register, since the powerpc only supports
19564 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19565 case PRE_INC:
19566 case PRE_DEC:
19567 reg = XEXP (addr, 0);
19568 if (!base_reg_operand (addr, GET_MODE (reg)))
19570 fail_msg = "no base register #1";
19571 extra_cost = -1;
19574 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19576 extra_cost = 1;
19577 type = "update";
19579 break;
19581 case PRE_MODIFY:
19582 reg = XEXP (addr, 0);
19583 plus_arg1 = XEXP (addr, 1);
19584 if (!base_reg_operand (reg, GET_MODE (reg))
19585 || GET_CODE (plus_arg1) != PLUS
19586 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19588 fail_msg = "bad PRE_MODIFY";
19589 extra_cost = -1;
19592 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19594 extra_cost = 1;
19595 type = "update";
19597 break;
19599 /* Do we need to simulate AND -16 to clear the bottom address bits used
19600 in VMX load/stores? Only allow the AND for vector sizes. */
19601 case AND:
19602 and_arg = XEXP (addr, 0);
19603 if (GET_MODE_SIZE (mode) != 16
19604 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19605 || INTVAL (XEXP (addr, 1)) != -16)
19607 fail_msg = "bad Altivec AND #1";
19608 extra_cost = -1;
19611 if (rclass != ALTIVEC_REGS)
19613 if (legitimate_indirect_address_p (and_arg, false))
19614 extra_cost = 1;
19616 else if (legitimate_indexed_address_p (and_arg, false))
19617 extra_cost = 2;
19619 else
19621 fail_msg = "bad Altivec AND #2";
19622 extra_cost = -1;
19625 type = "and";
19627 break;
19629 /* If this is an indirect address, make sure it is a base register. */
19630 case REG:
19631 case SUBREG:
19632 if (!legitimate_indirect_address_p (addr, false))
19634 extra_cost = 1;
19635 type = "move";
19637 break;
19639 /* If this is an indexed address, make sure the register class can handle
19640 indexed addresses for this mode. */
19641 case PLUS:
19642 plus_arg0 = XEXP (addr, 0);
19643 plus_arg1 = XEXP (addr, 1);
19645 /* (plus (plus (reg) (constant)) (constant)) is generated during
19646 push_reload processing, so handle it now. */
19647 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19649 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19651 extra_cost = 1;
19652 type = "offset";
19656 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19657 push_reload processing, so handle it now. */
19658 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19660 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19662 extra_cost = 1;
19663 type = "indexed #2";
19667 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19669 fail_msg = "no base register #2";
19670 extra_cost = -1;
19673 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19675 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19676 || !legitimate_indexed_address_p (addr, false))
19678 extra_cost = 1;
19679 type = "indexed";
19683 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19684 && CONST_INT_P (plus_arg1))
19686 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19688 extra_cost = 1;
19689 type = "vector d-form offset";
19693 /* Make sure the register class can handle offset addresses. */
19694 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19696 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19698 extra_cost = 1;
19699 type = "offset #2";
19703 else
19705 fail_msg = "bad PLUS";
19706 extra_cost = -1;
19709 break;
19711 case LO_SUM:
19712 /* Quad offsets are restricted and can't handle normal addresses. */
19713 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19715 extra_cost = -1;
19716 type = "vector d-form lo_sum";
19719 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19721 fail_msg = "bad LO_SUM";
19722 extra_cost = -1;
19725 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19727 extra_cost = 1;
19728 type = "lo_sum";
19730 break;
19732 /* Static addresses need to create a TOC entry. */
19733 case CONST:
19734 case SYMBOL_REF:
19735 case LABEL_REF:
19736 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19738 extra_cost = -1;
19739 type = "vector d-form lo_sum #2";
19742 else
19744 type = "address";
19745 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19747 break;
19749 /* TOC references look like offsetable memory. */
19750 case UNSPEC:
19751 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19753 fail_msg = "bad UNSPEC";
19754 extra_cost = -1;
19757 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19759 extra_cost = -1;
19760 type = "vector d-form lo_sum #3";
19763 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19765 extra_cost = 1;
19766 type = "toc reference";
19768 break;
19770 default:
19772 fail_msg = "bad address";
19773 extra_cost = -1;
19777 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19779 if (extra_cost < 0)
19780 fprintf (stderr,
19781 "rs6000_secondary_reload_memory error: mode = %s, "
19782 "class = %s, addr_mask = '%s', %s\n",
19783 GET_MODE_NAME (mode),
19784 reg_class_names[rclass],
19785 rs6000_debug_addr_mask (addr_mask, false),
19786 (fail_msg != NULL) ? fail_msg : "<bad address>");
19788 else
19789 fprintf (stderr,
19790 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19791 "addr_mask = '%s', extra cost = %d, %s\n",
19792 GET_MODE_NAME (mode),
19793 reg_class_names[rclass],
19794 rs6000_debug_addr_mask (addr_mask, false),
19795 extra_cost,
19796 (type) ? type : "<none>");
19798 debug_rtx (addr);
19801 return extra_cost;
19804 /* Helper function for rs6000_secondary_reload to return true if a move to a
19805 different register classe is really a simple move. */
19807 static bool
19808 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19809 enum rs6000_reg_type from_type,
19810 machine_mode mode)
19812 int size = GET_MODE_SIZE (mode);
19814 /* Add support for various direct moves available. In this function, we only
19815 look at cases where we don't need any extra registers, and one or more
19816 simple move insns are issued. Originally small integers are not allowed
19817 in FPR/VSX registers. Single precision binary floating is not a simple
19818 move because we need to convert to the single precision memory layout.
19819 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19820 need special direct move handling, which we do not support yet. */
19821 if (TARGET_DIRECT_MOVE
19822 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19823 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19825 if (TARGET_POWERPC64)
19827 /* ISA 2.07: MTVSRD or MVFVSRD. */
19828 if (size == 8)
19829 return true;
19831 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19832 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19833 return true;
19836 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19837 if (TARGET_P8_VECTOR)
19839 if (mode == SImode)
19840 return true;
19842 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19843 return true;
19846 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19847 if (mode == SDmode)
19848 return true;
19851 /* Power6+: MFTGPR or MFFGPR. */
19852 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19853 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19854 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19855 return true;
19857 /* Move to/from SPR. */
19858 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19859 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19860 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19861 return true;
19863 return false;
19866 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19867 special direct moves that involve allocating an extra register, return the
19868 insn code of the helper function if there is such a function or
19869 CODE_FOR_nothing if not. */
19871 static bool
19872 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19873 enum rs6000_reg_type from_type,
19874 machine_mode mode,
19875 secondary_reload_info *sri,
19876 bool altivec_p)
19878 bool ret = false;
19879 enum insn_code icode = CODE_FOR_nothing;
19880 int cost = 0;
19881 int size = GET_MODE_SIZE (mode);
19883 if (TARGET_POWERPC64 && size == 16)
19885 /* Handle moving 128-bit values from GPRs to VSX point registers on
19886 ISA 2.07 (power8, power9) when running in 64-bit mode using
19887 XXPERMDI to glue the two 64-bit values back together. */
19888 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19890 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19891 icode = reg_addr[mode].reload_vsx_gpr;
19894 /* Handle moving 128-bit values from VSX point registers to GPRs on
19895 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19896 bottom 64-bit value. */
19897 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19899 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19900 icode = reg_addr[mode].reload_gpr_vsx;
19904 else if (TARGET_POWERPC64 && mode == SFmode)
19906 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19908 cost = 3; /* xscvdpspn, mfvsrd, and. */
19909 icode = reg_addr[mode].reload_gpr_vsx;
19912 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19914 cost = 2; /* mtvsrz, xscvspdpn. */
19915 icode = reg_addr[mode].reload_vsx_gpr;
19919 else if (!TARGET_POWERPC64 && size == 8)
19921 /* Handle moving 64-bit values from GPRs to floating point registers on
19922 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19923 32-bit values back together. Altivec register classes must be handled
19924 specially since a different instruction is used, and the secondary
19925 reload support requires a single instruction class in the scratch
19926 register constraint. However, right now TFmode is not allowed in
19927 Altivec registers, so the pattern will never match. */
19928 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19930 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19931 icode = reg_addr[mode].reload_fpr_gpr;
19935 if (icode != CODE_FOR_nothing)
19937 ret = true;
19938 if (sri)
19940 sri->icode = icode;
19941 sri->extra_cost = cost;
19945 return ret;
19948 /* Return whether a move between two register classes can be done either
19949 directly (simple move) or via a pattern that uses a single extra temporary
19950 (using ISA 2.07's direct move in this case. */
19952 static bool
19953 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19954 enum rs6000_reg_type from_type,
19955 machine_mode mode,
19956 secondary_reload_info *sri,
19957 bool altivec_p)
19959 /* Fall back to load/store reloads if either type is not a register. */
19960 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19961 return false;
19963 /* If we haven't allocated registers yet, assume the move can be done for the
19964 standard register types. */
19965 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19966 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19967 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19968 return true;
19970 /* Moves to the same set of registers is a simple move for non-specialized
19971 registers. */
19972 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19973 return true;
19975 /* Check whether a simple move can be done directly. */
19976 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19978 if (sri)
19980 sri->icode = CODE_FOR_nothing;
19981 sri->extra_cost = 0;
19983 return true;
19986 /* Now check if we can do it in a few steps. */
19987 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19988 altivec_p);
19991 /* Inform reload about cases where moving X with a mode MODE to a register in
19992 RCLASS requires an extra scratch or immediate register. Return the class
19993 needed for the immediate register.
19995 For VSX and Altivec, we may need a register to convert sp+offset into
19996 reg+sp.
19998 For misaligned 64-bit gpr loads and stores we need a register to
19999 convert an offset address to indirect. */
20001 static reg_class_t
20002 rs6000_secondary_reload (bool in_p,
20003 rtx x,
20004 reg_class_t rclass_i,
20005 machine_mode mode,
20006 secondary_reload_info *sri)
20008 enum reg_class rclass = (enum reg_class) rclass_i;
20009 reg_class_t ret = ALL_REGS;
20010 enum insn_code icode;
20011 bool default_p = false;
20012 bool done_p = false;
20014 /* Allow subreg of memory before/during reload. */
20015 bool memory_p = (MEM_P (x)
20016 || (!reload_completed && GET_CODE (x) == SUBREG
20017 && MEM_P (SUBREG_REG (x))));
20019 sri->icode = CODE_FOR_nothing;
20020 sri->t_icode = CODE_FOR_nothing;
20021 sri->extra_cost = 0;
20022 icode = ((in_p)
20023 ? reg_addr[mode].reload_load
20024 : reg_addr[mode].reload_store);
20026 if (REG_P (x) || register_operand (x, mode))
20028 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
20029 bool altivec_p = (rclass == ALTIVEC_REGS);
20030 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
20032 if (!in_p)
20033 std::swap (to_type, from_type);
20035 /* Can we do a direct move of some sort? */
20036 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
20037 altivec_p))
20039 icode = (enum insn_code)sri->icode;
20040 default_p = false;
20041 done_p = true;
20042 ret = NO_REGS;
20046 /* Make sure 0.0 is not reloaded or forced into memory. */
20047 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
20049 ret = NO_REGS;
20050 default_p = false;
20051 done_p = true;
20054 /* If this is a scalar floating point value and we want to load it into the
20055 traditional Altivec registers, do it via a move via a traditional floating
20056 point register, unless we have D-form addressing. Also make sure that
20057 non-zero constants use a FPR. */
20058 if (!done_p && reg_addr[mode].scalar_in_vmx_p
20059 && !mode_supports_vmx_dform (mode)
20060 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20061 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
20063 ret = FLOAT_REGS;
20064 default_p = false;
20065 done_p = true;
20068 /* Handle reload of load/stores if we have reload helper functions. */
20069 if (!done_p && icode != CODE_FOR_nothing && memory_p)
20071 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
20072 mode);
20074 if (extra_cost >= 0)
20076 done_p = true;
20077 ret = NO_REGS;
20078 if (extra_cost > 0)
20080 sri->extra_cost = extra_cost;
20081 sri->icode = icode;
20086 /* Handle unaligned loads and stores of integer registers. */
20087 if (!done_p && TARGET_POWERPC64
20088 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20089 && memory_p
20090 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
20092 rtx addr = XEXP (x, 0);
20093 rtx off = address_offset (addr);
20095 if (off != NULL_RTX)
20097 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20098 unsigned HOST_WIDE_INT offset = INTVAL (off);
20100 /* We need a secondary reload when our legitimate_address_p
20101 says the address is good (as otherwise the entire address
20102 will be reloaded), and the offset is not a multiple of
20103 four or we have an address wrap. Address wrap will only
20104 occur for LO_SUMs since legitimate_offset_address_p
20105 rejects addresses for 16-byte mems that will wrap. */
20106 if (GET_CODE (addr) == LO_SUM
20107 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20108 && ((offset & 3) != 0
20109 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
20110 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
20111 && (offset & 3) != 0))
20113 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20114 if (in_p)
20115 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
20116 : CODE_FOR_reload_di_load);
20117 else
20118 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
20119 : CODE_FOR_reload_di_store);
20120 sri->extra_cost = 2;
20121 ret = NO_REGS;
20122 done_p = true;
20124 else
20125 default_p = true;
20127 else
20128 default_p = true;
20131 if (!done_p && !TARGET_POWERPC64
20132 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20133 && memory_p
20134 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
20136 rtx addr = XEXP (x, 0);
20137 rtx off = address_offset (addr);
20139 if (off != NULL_RTX)
20141 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20142 unsigned HOST_WIDE_INT offset = INTVAL (off);
20144 /* We need a secondary reload when our legitimate_address_p
20145 says the address is good (as otherwise the entire address
20146 will be reloaded), and we have a wrap.
20148 legitimate_lo_sum_address_p allows LO_SUM addresses to
20149 have any offset so test for wrap in the low 16 bits.
20151 legitimate_offset_address_p checks for the range
20152 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20153 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20154 [0x7ff4,0x7fff] respectively, so test for the
20155 intersection of these ranges, [0x7ffc,0x7fff] and
20156 [0x7ff4,0x7ff7] respectively.
20158 Note that the address we see here may have been
20159 manipulated by legitimize_reload_address. */
20160 if (GET_CODE (addr) == LO_SUM
20161 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20162 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20164 if (in_p)
20165 sri->icode = CODE_FOR_reload_si_load;
20166 else
20167 sri->icode = CODE_FOR_reload_si_store;
20168 sri->extra_cost = 2;
20169 ret = NO_REGS;
20170 done_p = true;
20172 else
20173 default_p = true;
20175 else
20176 default_p = true;
20179 if (!done_p)
20180 default_p = true;
20182 if (default_p)
20183 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20185 gcc_assert (ret != ALL_REGS);
20187 if (TARGET_DEBUG_ADDR)
20189 fprintf (stderr,
20190 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20191 "mode = %s",
20192 reg_class_names[ret],
20193 in_p ? "true" : "false",
20194 reg_class_names[rclass],
20195 GET_MODE_NAME (mode));
20197 if (reload_completed)
20198 fputs (", after reload", stderr);
20200 if (!done_p)
20201 fputs (", done_p not set", stderr);
20203 if (default_p)
20204 fputs (", default secondary reload", stderr);
20206 if (sri->icode != CODE_FOR_nothing)
20207 fprintf (stderr, ", reload func = %s, extra cost = %d",
20208 insn_data[sri->icode].name, sri->extra_cost);
20210 else if (sri->extra_cost > 0)
20211 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20213 fputs ("\n", stderr);
20214 debug_rtx (x);
20217 return ret;
20220 /* Better tracing for rs6000_secondary_reload_inner. */
20222 static void
20223 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20224 bool store_p)
20226 rtx set, clobber;
20228 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20230 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20231 store_p ? "store" : "load");
20233 if (store_p)
20234 set = gen_rtx_SET (mem, reg);
20235 else
20236 set = gen_rtx_SET (reg, mem);
20238 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20239 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20242 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20243 ATTRIBUTE_NORETURN;
20245 static void
20246 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20247 bool store_p)
20249 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20250 gcc_unreachable ();
20253 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20254 reload helper functions. These were identified in
20255 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20256 reload, it calls the insns:
20257 reload_<RELOAD:mode>_<P:mptrsize>_store
20258 reload_<RELOAD:mode>_<P:mptrsize>_load
20260 which in turn calls this function, to do whatever is necessary to create
20261 valid addresses. */
20263 void
20264 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20266 int regno = true_regnum (reg);
20267 machine_mode mode = GET_MODE (reg);
20268 addr_mask_type addr_mask;
20269 rtx addr;
20270 rtx new_addr;
20271 rtx op_reg, op0, op1;
20272 rtx and_op;
20273 rtx cc_clobber;
20274 rtvec rv;
20276 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20277 || !base_reg_operand (scratch, GET_MODE (scratch)))
20278 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20280 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20281 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20283 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20284 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20286 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20287 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20289 else
20290 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20292 /* Make sure the mode is valid in this register class. */
20293 if ((addr_mask & RELOAD_REG_VALID) == 0)
20294 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20296 if (TARGET_DEBUG_ADDR)
20297 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20299 new_addr = addr = XEXP (mem, 0);
20300 switch (GET_CODE (addr))
20302 /* Does the register class support auto update forms for this mode? If
20303 not, do the update now. We don't need a scratch register, since the
20304 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20305 case PRE_INC:
20306 case PRE_DEC:
20307 op_reg = XEXP (addr, 0);
20308 if (!base_reg_operand (op_reg, Pmode))
20309 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20311 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20313 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20314 new_addr = op_reg;
20316 break;
20318 case PRE_MODIFY:
20319 op0 = XEXP (addr, 0);
20320 op1 = XEXP (addr, 1);
20321 if (!base_reg_operand (op0, Pmode)
20322 || GET_CODE (op1) != PLUS
20323 || !rtx_equal_p (op0, XEXP (op1, 0)))
20324 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20326 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20328 emit_insn (gen_rtx_SET (op0, op1));
20329 new_addr = reg;
20331 break;
20333 /* Do we need to simulate AND -16 to clear the bottom address bits used
20334 in VMX load/stores? */
20335 case AND:
20336 op0 = XEXP (addr, 0);
20337 op1 = XEXP (addr, 1);
20338 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20340 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20341 op_reg = op0;
20343 else if (GET_CODE (op1) == PLUS)
20345 emit_insn (gen_rtx_SET (scratch, op1));
20346 op_reg = scratch;
20349 else
20350 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20352 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20353 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20354 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20355 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20356 new_addr = scratch;
20358 break;
20360 /* If this is an indirect address, make sure it is a base register. */
20361 case REG:
20362 case SUBREG:
20363 if (!base_reg_operand (addr, GET_MODE (addr)))
20365 emit_insn (gen_rtx_SET (scratch, addr));
20366 new_addr = scratch;
20368 break;
20370 /* If this is an indexed address, make sure the register class can handle
20371 indexed addresses for this mode. */
20372 case PLUS:
20373 op0 = XEXP (addr, 0);
20374 op1 = XEXP (addr, 1);
20375 if (!base_reg_operand (op0, Pmode))
20376 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20378 else if (int_reg_operand (op1, Pmode))
20380 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20382 emit_insn (gen_rtx_SET (scratch, addr));
20383 new_addr = scratch;
20387 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20389 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20390 || !quad_address_p (addr, mode, false))
20392 emit_insn (gen_rtx_SET (scratch, addr));
20393 new_addr = scratch;
20397 /* Make sure the register class can handle offset addresses. */
20398 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20400 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20402 emit_insn (gen_rtx_SET (scratch, addr));
20403 new_addr = scratch;
20407 else
20408 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20410 break;
20412 case LO_SUM:
20413 op0 = XEXP (addr, 0);
20414 op1 = XEXP (addr, 1);
20415 if (!base_reg_operand (op0, Pmode))
20416 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20418 else if (int_reg_operand (op1, Pmode))
20420 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20422 emit_insn (gen_rtx_SET (scratch, addr));
20423 new_addr = scratch;
20427 /* Quad offsets are restricted and can't handle normal addresses. */
20428 else if (mode_supports_vsx_dform_quad (mode))
20430 emit_insn (gen_rtx_SET (scratch, addr));
20431 new_addr = scratch;
20434 /* Make sure the register class can handle offset addresses. */
20435 else if (legitimate_lo_sum_address_p (mode, addr, false))
20437 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20439 emit_insn (gen_rtx_SET (scratch, addr));
20440 new_addr = scratch;
20444 else
20445 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20447 break;
20449 case SYMBOL_REF:
20450 case CONST:
20451 case LABEL_REF:
20452 rs6000_emit_move (scratch, addr, Pmode);
20453 new_addr = scratch;
20454 break;
20456 default:
20457 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20460 /* Adjust the address if it changed. */
20461 if (addr != new_addr)
20463 mem = replace_equiv_address_nv (mem, new_addr);
20464 if (TARGET_DEBUG_ADDR)
20465 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20468 /* Now create the move. */
20469 if (store_p)
20470 emit_insn (gen_rtx_SET (mem, reg));
20471 else
20472 emit_insn (gen_rtx_SET (reg, mem));
20474 return;
20477 /* Convert reloads involving 64-bit gprs and misaligned offset
20478 addressing, or multiple 32-bit gprs and offsets that are too large,
20479 to use indirect addressing. */
20481 void
20482 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20484 int regno = true_regnum (reg);
20485 enum reg_class rclass;
20486 rtx addr;
20487 rtx scratch_or_premodify = scratch;
20489 if (TARGET_DEBUG_ADDR)
20491 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20492 store_p ? "store" : "load");
20493 fprintf (stderr, "reg:\n");
20494 debug_rtx (reg);
20495 fprintf (stderr, "mem:\n");
20496 debug_rtx (mem);
20497 fprintf (stderr, "scratch:\n");
20498 debug_rtx (scratch);
20501 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20502 gcc_assert (GET_CODE (mem) == MEM);
20503 rclass = REGNO_REG_CLASS (regno);
20504 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20505 addr = XEXP (mem, 0);
20507 if (GET_CODE (addr) == PRE_MODIFY)
20509 gcc_assert (REG_P (XEXP (addr, 0))
20510 && GET_CODE (XEXP (addr, 1)) == PLUS
20511 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20512 scratch_or_premodify = XEXP (addr, 0);
20513 if (!HARD_REGISTER_P (scratch_or_premodify))
20514 /* If we have a pseudo here then reload will have arranged
20515 to have it replaced, but only in the original insn.
20516 Use the replacement here too. */
20517 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20519 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20520 expressions from the original insn, without unsharing them.
20521 Any RTL that points into the original insn will of course
20522 have register replacements applied. That is why we don't
20523 need to look for replacements under the PLUS. */
20524 addr = XEXP (addr, 1);
20526 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20528 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20530 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20532 /* Now create the move. */
20533 if (store_p)
20534 emit_insn (gen_rtx_SET (mem, reg));
20535 else
20536 emit_insn (gen_rtx_SET (reg, mem));
20538 return;
20541 /* Given an rtx X being reloaded into a reg required to be
20542 in class CLASS, return the class of reg to actually use.
20543 In general this is just CLASS; but on some machines
20544 in some cases it is preferable to use a more restrictive class.
20546 On the RS/6000, we have to return NO_REGS when we want to reload a
20547 floating-point CONST_DOUBLE to force it to be copied to memory.
20549 We also don't want to reload integer values into floating-point
20550 registers if we can at all help it. In fact, this can
20551 cause reload to die, if it tries to generate a reload of CTR
20552 into a FP register and discovers it doesn't have the memory location
20553 required.
20555 ??? Would it be a good idea to have reload do the converse, that is
20556 try to reload floating modes into FP registers if possible?
20559 static enum reg_class
20560 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20562 machine_mode mode = GET_MODE (x);
20563 bool is_constant = CONSTANT_P (x);
20565 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20566 reload class for it. */
20567 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20568 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20569 return NO_REGS;
20571 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20572 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20573 return NO_REGS;
20575 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20576 the reloading of address expressions using PLUS into floating point
20577 registers. */
20578 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20580 if (is_constant)
20582 /* Zero is always allowed in all VSX registers. */
20583 if (x == CONST0_RTX (mode))
20584 return rclass;
20586 /* If this is a vector constant that can be formed with a few Altivec
20587 instructions, we want altivec registers. */
20588 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20589 return ALTIVEC_REGS;
20591 /* If this is an integer constant that can easily be loaded into
20592 vector registers, allow it. */
20593 if (CONST_INT_P (x))
20595 HOST_WIDE_INT value = INTVAL (x);
20597 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20598 2.06 can generate it in the Altivec registers with
20599 VSPLTI<x>. */
20600 if (value == -1)
20602 if (TARGET_P8_VECTOR)
20603 return rclass;
20604 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20605 return ALTIVEC_REGS;
20606 else
20607 return NO_REGS;
20610 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20611 a sign extend in the Altivec registers. */
20612 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20613 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20614 return ALTIVEC_REGS;
20617 /* Force constant to memory. */
20618 return NO_REGS;
20621 /* D-form addressing can easily reload the value. */
20622 if (mode_supports_vmx_dform (mode)
20623 || mode_supports_vsx_dform_quad (mode))
20624 return rclass;
20626 /* If this is a scalar floating point value and we don't have D-form
20627 addressing, prefer the traditional floating point registers so that we
20628 can use D-form (register+offset) addressing. */
20629 if (rclass == VSX_REGS
20630 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20631 return FLOAT_REGS;
20633 /* Prefer the Altivec registers if Altivec is handling the vector
20634 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20635 loads. */
20636 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20637 || mode == V1TImode)
20638 return ALTIVEC_REGS;
20640 return rclass;
20643 if (is_constant || GET_CODE (x) == PLUS)
20645 if (reg_class_subset_p (GENERAL_REGS, rclass))
20646 return GENERAL_REGS;
20647 if (reg_class_subset_p (BASE_REGS, rclass))
20648 return BASE_REGS;
20649 return NO_REGS;
20652 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20653 return GENERAL_REGS;
20655 return rclass;
20658 /* Debug version of rs6000_preferred_reload_class. */
20659 static enum reg_class
20660 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20662 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20664 fprintf (stderr,
20665 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20666 "mode = %s, x:\n",
20667 reg_class_names[ret], reg_class_names[rclass],
20668 GET_MODE_NAME (GET_MODE (x)));
20669 debug_rtx (x);
20671 return ret;
20674 /* If we are copying between FP or AltiVec registers and anything else, we need
20675 a memory location. The exception is when we are targeting ppc64 and the
20676 move to/from fpr to gpr instructions are available. Also, under VSX, you
20677 can copy vector registers from the FP register set to the Altivec register
20678 set and vice versa. */
20680 static bool
20681 rs6000_secondary_memory_needed (machine_mode mode,
20682 reg_class_t from_class,
20683 reg_class_t to_class)
20685 enum rs6000_reg_type from_type, to_type;
20686 bool altivec_p = ((from_class == ALTIVEC_REGS)
20687 || (to_class == ALTIVEC_REGS));
20689 /* If a simple/direct move is available, we don't need secondary memory */
20690 from_type = reg_class_to_reg_type[(int)from_class];
20691 to_type = reg_class_to_reg_type[(int)to_class];
20693 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20694 (secondary_reload_info *)0, altivec_p))
20695 return false;
20697 /* If we have a floating point or vector register class, we need to use
20698 memory to transfer the data. */
20699 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20700 return true;
20702 return false;
20705 /* Debug version of rs6000_secondary_memory_needed. */
20706 static bool
20707 rs6000_debug_secondary_memory_needed (machine_mode mode,
20708 reg_class_t from_class,
20709 reg_class_t to_class)
20711 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20713 fprintf (stderr,
20714 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20715 "to_class = %s, mode = %s\n",
20716 ret ? "true" : "false",
20717 reg_class_names[from_class],
20718 reg_class_names[to_class],
20719 GET_MODE_NAME (mode));
20721 return ret;
20724 /* Return the register class of a scratch register needed to copy IN into
20725 or out of a register in RCLASS in MODE. If it can be done directly,
20726 NO_REGS is returned. */
20728 static enum reg_class
20729 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20730 rtx in)
20732 int regno;
20734 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20735 #if TARGET_MACHO
20736 && MACHOPIC_INDIRECT
20737 #endif
20740 /* We cannot copy a symbolic operand directly into anything
20741 other than BASE_REGS for TARGET_ELF. So indicate that a
20742 register from BASE_REGS is needed as an intermediate
20743 register.
20745 On Darwin, pic addresses require a load from memory, which
20746 needs a base register. */
20747 if (rclass != BASE_REGS
20748 && (GET_CODE (in) == SYMBOL_REF
20749 || GET_CODE (in) == HIGH
20750 || GET_CODE (in) == LABEL_REF
20751 || GET_CODE (in) == CONST))
20752 return BASE_REGS;
20755 if (GET_CODE (in) == REG)
20757 regno = REGNO (in);
20758 if (regno >= FIRST_PSEUDO_REGISTER)
20760 regno = true_regnum (in);
20761 if (regno >= FIRST_PSEUDO_REGISTER)
20762 regno = -1;
20765 else if (GET_CODE (in) == SUBREG)
20767 regno = true_regnum (in);
20768 if (regno >= FIRST_PSEUDO_REGISTER)
20769 regno = -1;
20771 else
20772 regno = -1;
20774 /* If we have VSX register moves, prefer moving scalar values between
20775 Altivec registers and GPR by going via an FPR (and then via memory)
20776 instead of reloading the secondary memory address for Altivec moves. */
20777 if (TARGET_VSX
20778 && GET_MODE_SIZE (mode) < 16
20779 && !mode_supports_vmx_dform (mode)
20780 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20781 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20782 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20783 && (regno >= 0 && INT_REGNO_P (regno)))))
20784 return FLOAT_REGS;
20786 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20787 into anything. */
20788 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20789 || (regno >= 0 && INT_REGNO_P (regno)))
20790 return NO_REGS;
20792 /* Constants, memory, and VSX registers can go into VSX registers (both the
20793 traditional floating point and the altivec registers). */
20794 if (rclass == VSX_REGS
20795 && (regno == -1 || VSX_REGNO_P (regno)))
20796 return NO_REGS;
20798 /* Constants, memory, and FP registers can go into FP registers. */
20799 if ((regno == -1 || FP_REGNO_P (regno))
20800 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20801 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20803 /* Memory, and AltiVec registers can go into AltiVec registers. */
20804 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20805 && rclass == ALTIVEC_REGS)
20806 return NO_REGS;
20808 /* We can copy among the CR registers. */
20809 if ((rclass == CR_REGS || rclass == CR0_REGS)
20810 && regno >= 0 && CR_REGNO_P (regno))
20811 return NO_REGS;
20813 /* Otherwise, we need GENERAL_REGS. */
20814 return GENERAL_REGS;
20817 /* Debug version of rs6000_secondary_reload_class. */
20818 static enum reg_class
20819 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20820 machine_mode mode, rtx in)
20822 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20823 fprintf (stderr,
20824 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20825 "mode = %s, input rtx:\n",
20826 reg_class_names[ret], reg_class_names[rclass],
20827 GET_MODE_NAME (mode));
20828 debug_rtx (in);
20830 return ret;
20833 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20835 static bool
20836 rs6000_can_change_mode_class (machine_mode from,
20837 machine_mode to,
20838 reg_class_t rclass)
20840 unsigned from_size = GET_MODE_SIZE (from);
20841 unsigned to_size = GET_MODE_SIZE (to);
20843 if (from_size != to_size)
20845 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20847 if (reg_classes_intersect_p (xclass, rclass))
20849 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20850 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20851 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20852 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20854 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20855 single register under VSX because the scalar part of the register
20856 is in the upper 64-bits, and not the lower 64-bits. Types like
20857 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20858 IEEE floating point can't overlap, and neither can small
20859 values. */
20861 if (to_float128_vector_p && from_float128_vector_p)
20862 return true;
20864 else if (to_float128_vector_p || from_float128_vector_p)
20865 return false;
20867 /* TDmode in floating-mode registers must always go into a register
20868 pair with the most significant word in the even-numbered register
20869 to match ISA requirements. In little-endian mode, this does not
20870 match subreg numbering, so we cannot allow subregs. */
20871 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20872 return false;
20874 if (from_size < 8 || to_size < 8)
20875 return false;
20877 if (from_size == 8 && (8 * to_nregs) != to_size)
20878 return false;
20880 if (to_size == 8 && (8 * from_nregs) != from_size)
20881 return false;
20883 return true;
20885 else
20886 return true;
20889 /* Since the VSX register set includes traditional floating point registers
20890 and altivec registers, just check for the size being different instead of
20891 trying to check whether the modes are vector modes. Otherwise it won't
20892 allow say DF and DI to change classes. For types like TFmode and TDmode
20893 that take 2 64-bit registers, rather than a single 128-bit register, don't
20894 allow subregs of those types to other 128 bit types. */
20895 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20897 unsigned num_regs = (from_size + 15) / 16;
20898 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20899 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20900 return false;
20902 return (from_size == 8 || from_size == 16);
20905 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20906 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20907 return false;
20909 return true;
20912 /* Debug version of rs6000_can_change_mode_class. */
20913 static bool
20914 rs6000_debug_can_change_mode_class (machine_mode from,
20915 machine_mode to,
20916 reg_class_t rclass)
20918 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20920 fprintf (stderr,
20921 "rs6000_can_change_mode_class, return %s, from = %s, "
20922 "to = %s, rclass = %s\n",
20923 ret ? "true" : "false",
20924 GET_MODE_NAME (from), GET_MODE_NAME (to),
20925 reg_class_names[rclass]);
20927 return ret;
20930 /* Return a string to do a move operation of 128 bits of data. */
20932 const char *
20933 rs6000_output_move_128bit (rtx operands[])
20935 rtx dest = operands[0];
20936 rtx src = operands[1];
20937 machine_mode mode = GET_MODE (dest);
20938 int dest_regno;
20939 int src_regno;
20940 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20941 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20943 if (REG_P (dest))
20945 dest_regno = REGNO (dest);
20946 dest_gpr_p = INT_REGNO_P (dest_regno);
20947 dest_fp_p = FP_REGNO_P (dest_regno);
20948 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20949 dest_vsx_p = dest_fp_p | dest_vmx_p;
20951 else
20953 dest_regno = -1;
20954 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20957 if (REG_P (src))
20959 src_regno = REGNO (src);
20960 src_gpr_p = INT_REGNO_P (src_regno);
20961 src_fp_p = FP_REGNO_P (src_regno);
20962 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20963 src_vsx_p = src_fp_p | src_vmx_p;
20965 else
20967 src_regno = -1;
20968 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20971 /* Register moves. */
20972 if (dest_regno >= 0 && src_regno >= 0)
20974 if (dest_gpr_p)
20976 if (src_gpr_p)
20977 return "#";
20979 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20980 return (WORDS_BIG_ENDIAN
20981 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20982 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20984 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20985 return "#";
20988 else if (TARGET_VSX && dest_vsx_p)
20990 if (src_vsx_p)
20991 return "xxlor %x0,%x1,%x1";
20993 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20994 return (WORDS_BIG_ENDIAN
20995 ? "mtvsrdd %x0,%1,%L1"
20996 : "mtvsrdd %x0,%L1,%1");
20998 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20999 return "#";
21002 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
21003 return "vor %0,%1,%1";
21005 else if (dest_fp_p && src_fp_p)
21006 return "#";
21009 /* Loads. */
21010 else if (dest_regno >= 0 && MEM_P (src))
21012 if (dest_gpr_p)
21014 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
21015 return "lq %0,%1";
21016 else
21017 return "#";
21020 else if (TARGET_ALTIVEC && dest_vmx_p
21021 && altivec_indexed_or_indirect_operand (src, mode))
21022 return "lvx %0,%y1";
21024 else if (TARGET_VSX && dest_vsx_p)
21026 if (mode_supports_vsx_dform_quad (mode)
21027 && quad_address_p (XEXP (src, 0), mode, true))
21028 return "lxv %x0,%1";
21030 else if (TARGET_P9_VECTOR)
21031 return "lxvx %x0,%y1";
21033 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
21034 return "lxvw4x %x0,%y1";
21036 else
21037 return "lxvd2x %x0,%y1";
21040 else if (TARGET_ALTIVEC && dest_vmx_p)
21041 return "lvx %0,%y1";
21043 else if (dest_fp_p)
21044 return "#";
21047 /* Stores. */
21048 else if (src_regno >= 0 && MEM_P (dest))
21050 if (src_gpr_p)
21052 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
21053 return "stq %1,%0";
21054 else
21055 return "#";
21058 else if (TARGET_ALTIVEC && src_vmx_p
21059 && altivec_indexed_or_indirect_operand (src, mode))
21060 return "stvx %1,%y0";
21062 else if (TARGET_VSX && src_vsx_p)
21064 if (mode_supports_vsx_dform_quad (mode)
21065 && quad_address_p (XEXP (dest, 0), mode, true))
21066 return "stxv %x1,%0";
21068 else if (TARGET_P9_VECTOR)
21069 return "stxvx %x1,%y0";
21071 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
21072 return "stxvw4x %x1,%y0";
21074 else
21075 return "stxvd2x %x1,%y0";
21078 else if (TARGET_ALTIVEC && src_vmx_p)
21079 return "stvx %1,%y0";
21081 else if (src_fp_p)
21082 return "#";
21085 /* Constants. */
21086 else if (dest_regno >= 0
21087 && (GET_CODE (src) == CONST_INT
21088 || GET_CODE (src) == CONST_WIDE_INT
21089 || GET_CODE (src) == CONST_DOUBLE
21090 || GET_CODE (src) == CONST_VECTOR))
21092 if (dest_gpr_p)
21093 return "#";
21095 else if ((dest_vmx_p && TARGET_ALTIVEC)
21096 || (dest_vsx_p && TARGET_VSX))
21097 return output_vec_const_move (operands);
21100 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
21103 /* Validate a 128-bit move. */
21104 bool
21105 rs6000_move_128bit_ok_p (rtx operands[])
21107 machine_mode mode = GET_MODE (operands[0]);
21108 return (gpc_reg_operand (operands[0], mode)
21109 || gpc_reg_operand (operands[1], mode));
21112 /* Return true if a 128-bit move needs to be split. */
21113 bool
21114 rs6000_split_128bit_ok_p (rtx operands[])
21116 if (!reload_completed)
21117 return false;
21119 if (!gpr_or_gpr_p (operands[0], operands[1]))
21120 return false;
21122 if (quad_load_store_p (operands[0], operands[1]))
21123 return false;
21125 return true;
21129 /* Given a comparison operation, return the bit number in CCR to test. We
21130 know this is a valid comparison.
21132 SCC_P is 1 if this is for an scc. That means that %D will have been
21133 used instead of %C, so the bits will be in different places.
21135 Return -1 if OP isn't a valid comparison for some reason. */
21138 ccr_bit (rtx op, int scc_p)
21140 enum rtx_code code = GET_CODE (op);
21141 machine_mode cc_mode;
21142 int cc_regnum;
21143 int base_bit;
21144 rtx reg;
21146 if (!COMPARISON_P (op))
21147 return -1;
21149 reg = XEXP (op, 0);
21151 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21153 cc_mode = GET_MODE (reg);
21154 cc_regnum = REGNO (reg);
21155 base_bit = 4 * (cc_regnum - CR0_REGNO);
21157 validate_condition_mode (code, cc_mode);
21159 /* When generating a sCOND operation, only positive conditions are
21160 allowed. */
21161 gcc_assert (!scc_p
21162 || code == EQ || code == GT || code == LT || code == UNORDERED
21163 || code == GTU || code == LTU);
21165 switch (code)
21167 case NE:
21168 return scc_p ? base_bit + 3 : base_bit + 2;
21169 case EQ:
21170 return base_bit + 2;
21171 case GT: case GTU: case UNLE:
21172 return base_bit + 1;
21173 case LT: case LTU: case UNGE:
21174 return base_bit;
21175 case ORDERED: case UNORDERED:
21176 return base_bit + 3;
21178 case GE: case GEU:
21179 /* If scc, we will have done a cror to put the bit in the
21180 unordered position. So test that bit. For integer, this is ! LT
21181 unless this is an scc insn. */
21182 return scc_p ? base_bit + 3 : base_bit;
21184 case LE: case LEU:
21185 return scc_p ? base_bit + 3 : base_bit + 1;
21187 default:
21188 gcc_unreachable ();
21192 /* Return the GOT register. */
21195 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21197 /* The second flow pass currently (June 1999) can't update
21198 regs_ever_live without disturbing other parts of the compiler, so
21199 update it here to make the prolog/epilogue code happy. */
21200 if (!can_create_pseudo_p ()
21201 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21202 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21204 crtl->uses_pic_offset_table = 1;
21206 return pic_offset_table_rtx;
21209 static rs6000_stack_t stack_info;
21211 /* Function to init struct machine_function.
21212 This will be called, via a pointer variable,
21213 from push_function_context. */
21215 static struct machine_function *
21216 rs6000_init_machine_status (void)
21218 stack_info.reload_completed = 0;
21219 return ggc_cleared_alloc<machine_function> ();
21222 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21224 /* Write out a function code label. */
21226 void
21227 rs6000_output_function_entry (FILE *file, const char *fname)
21229 if (fname[0] != '.')
21231 switch (DEFAULT_ABI)
21233 default:
21234 gcc_unreachable ();
21236 case ABI_AIX:
21237 if (DOT_SYMBOLS)
21238 putc ('.', file);
21239 else
21240 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21241 break;
21243 case ABI_ELFv2:
21244 case ABI_V4:
21245 case ABI_DARWIN:
21246 break;
21250 RS6000_OUTPUT_BASENAME (file, fname);
21253 /* Print an operand. Recognize special options, documented below. */
21255 #if TARGET_ELF
21256 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21257 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21258 #else
21259 #define SMALL_DATA_RELOC "sda21"
21260 #define SMALL_DATA_REG 0
21261 #endif
21263 void
21264 print_operand (FILE *file, rtx x, int code)
21266 int i;
21267 unsigned HOST_WIDE_INT uval;
21269 switch (code)
21271 /* %a is output_address. */
21273 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21274 output_operand. */
21276 case 'D':
21277 /* Like 'J' but get to the GT bit only. */
21278 gcc_assert (REG_P (x));
21280 /* Bit 1 is GT bit. */
21281 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21283 /* Add one for shift count in rlinm for scc. */
21284 fprintf (file, "%d", i + 1);
21285 return;
21287 case 'e':
21288 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21289 if (! INT_P (x))
21291 output_operand_lossage ("invalid %%e value");
21292 return;
21295 uval = INTVAL (x);
21296 if ((uval & 0xffff) == 0 && uval != 0)
21297 putc ('s', file);
21298 return;
21300 case 'E':
21301 /* X is a CR register. Print the number of the EQ bit of the CR */
21302 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21303 output_operand_lossage ("invalid %%E value");
21304 else
21305 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21306 return;
21308 case 'f':
21309 /* X is a CR register. Print the shift count needed to move it
21310 to the high-order four bits. */
21311 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21312 output_operand_lossage ("invalid %%f value");
21313 else
21314 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21315 return;
21317 case 'F':
21318 /* Similar, but print the count for the rotate in the opposite
21319 direction. */
21320 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21321 output_operand_lossage ("invalid %%F value");
21322 else
21323 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21324 return;
21326 case 'G':
21327 /* X is a constant integer. If it is negative, print "m",
21328 otherwise print "z". This is to make an aze or ame insn. */
21329 if (GET_CODE (x) != CONST_INT)
21330 output_operand_lossage ("invalid %%G value");
21331 else if (INTVAL (x) >= 0)
21332 putc ('z', file);
21333 else
21334 putc ('m', file);
21335 return;
21337 case 'h':
21338 /* If constant, output low-order five bits. Otherwise, write
21339 normally. */
21340 if (INT_P (x))
21341 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21342 else
21343 print_operand (file, x, 0);
21344 return;
21346 case 'H':
21347 /* If constant, output low-order six bits. Otherwise, write
21348 normally. */
21349 if (INT_P (x))
21350 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21351 else
21352 print_operand (file, x, 0);
21353 return;
21355 case 'I':
21356 /* Print `i' if this is a constant, else nothing. */
21357 if (INT_P (x))
21358 putc ('i', file);
21359 return;
21361 case 'j':
21362 /* Write the bit number in CCR for jump. */
21363 i = ccr_bit (x, 0);
21364 if (i == -1)
21365 output_operand_lossage ("invalid %%j code");
21366 else
21367 fprintf (file, "%d", i);
21368 return;
21370 case 'J':
21371 /* Similar, but add one for shift count in rlinm for scc and pass
21372 scc flag to `ccr_bit'. */
21373 i = ccr_bit (x, 1);
21374 if (i == -1)
21375 output_operand_lossage ("invalid %%J code");
21376 else
21377 /* If we want bit 31, write a shift count of zero, not 32. */
21378 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21379 return;
21381 case 'k':
21382 /* X must be a constant. Write the 1's complement of the
21383 constant. */
21384 if (! INT_P (x))
21385 output_operand_lossage ("invalid %%k value");
21386 else
21387 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21388 return;
21390 case 'K':
21391 /* X must be a symbolic constant on ELF. Write an
21392 expression suitable for an 'addi' that adds in the low 16
21393 bits of the MEM. */
21394 if (GET_CODE (x) == CONST)
21396 if (GET_CODE (XEXP (x, 0)) != PLUS
21397 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21398 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21399 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21400 output_operand_lossage ("invalid %%K value");
21402 print_operand_address (file, x);
21403 fputs ("@l", file);
21404 return;
21406 /* %l is output_asm_label. */
21408 case 'L':
21409 /* Write second word of DImode or DFmode reference. Works on register
21410 or non-indexed memory only. */
21411 if (REG_P (x))
21412 fputs (reg_names[REGNO (x) + 1], file);
21413 else if (MEM_P (x))
21415 machine_mode mode = GET_MODE (x);
21416 /* Handle possible auto-increment. Since it is pre-increment and
21417 we have already done it, we can just use an offset of word. */
21418 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21419 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21420 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21421 UNITS_PER_WORD));
21422 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21423 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21424 UNITS_PER_WORD));
21425 else
21426 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21427 UNITS_PER_WORD),
21428 0));
21430 if (small_data_operand (x, GET_MODE (x)))
21431 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21432 reg_names[SMALL_DATA_REG]);
21434 return;
21436 case 'N': /* Unused */
21437 /* Write the number of elements in the vector times 4. */
21438 if (GET_CODE (x) != PARALLEL)
21439 output_operand_lossage ("invalid %%N value");
21440 else
21441 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21442 return;
21444 case 'O': /* Unused */
21445 /* Similar, but subtract 1 first. */
21446 if (GET_CODE (x) != PARALLEL)
21447 output_operand_lossage ("invalid %%O value");
21448 else
21449 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21450 return;
21452 case 'p':
21453 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21454 if (! INT_P (x)
21455 || INTVAL (x) < 0
21456 || (i = exact_log2 (INTVAL (x))) < 0)
21457 output_operand_lossage ("invalid %%p value");
21458 else
21459 fprintf (file, "%d", i);
21460 return;
21462 case 'P':
21463 /* The operand must be an indirect memory reference. The result
21464 is the register name. */
21465 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21466 || REGNO (XEXP (x, 0)) >= 32)
21467 output_operand_lossage ("invalid %%P value");
21468 else
21469 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21470 return;
21472 case 'q':
21473 /* This outputs the logical code corresponding to a boolean
21474 expression. The expression may have one or both operands
21475 negated (if one, only the first one). For condition register
21476 logical operations, it will also treat the negated
21477 CR codes as NOTs, but not handle NOTs of them. */
21479 const char *const *t = 0;
21480 const char *s;
21481 enum rtx_code code = GET_CODE (x);
21482 static const char * const tbl[3][3] = {
21483 { "and", "andc", "nor" },
21484 { "or", "orc", "nand" },
21485 { "xor", "eqv", "xor" } };
21487 if (code == AND)
21488 t = tbl[0];
21489 else if (code == IOR)
21490 t = tbl[1];
21491 else if (code == XOR)
21492 t = tbl[2];
21493 else
21494 output_operand_lossage ("invalid %%q value");
21496 if (GET_CODE (XEXP (x, 0)) != NOT)
21497 s = t[0];
21498 else
21500 if (GET_CODE (XEXP (x, 1)) == NOT)
21501 s = t[2];
21502 else
21503 s = t[1];
21506 fputs (s, file);
21508 return;
21510 case 'Q':
21511 if (! TARGET_MFCRF)
21512 return;
21513 fputc (',', file);
21514 /* FALLTHRU */
21516 case 'R':
21517 /* X is a CR register. Print the mask for `mtcrf'. */
21518 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21519 output_operand_lossage ("invalid %%R value");
21520 else
21521 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21522 return;
21524 case 's':
21525 /* Low 5 bits of 32 - value */
21526 if (! INT_P (x))
21527 output_operand_lossage ("invalid %%s value");
21528 else
21529 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21530 return;
21532 case 't':
21533 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21534 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21536 /* Bit 3 is OV bit. */
21537 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21539 /* If we want bit 31, write a shift count of zero, not 32. */
21540 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21541 return;
21543 case 'T':
21544 /* Print the symbolic name of a branch target register. */
21545 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21546 && REGNO (x) != CTR_REGNO))
21547 output_operand_lossage ("invalid %%T value");
21548 else if (REGNO (x) == LR_REGNO)
21549 fputs ("lr", file);
21550 else
21551 fputs ("ctr", file);
21552 return;
21554 case 'u':
21555 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21556 for use in unsigned operand. */
21557 if (! INT_P (x))
21559 output_operand_lossage ("invalid %%u value");
21560 return;
21563 uval = INTVAL (x);
21564 if ((uval & 0xffff) == 0)
21565 uval >>= 16;
21567 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21568 return;
21570 case 'v':
21571 /* High-order 16 bits of constant for use in signed operand. */
21572 if (! INT_P (x))
21573 output_operand_lossage ("invalid %%v value");
21574 else
21575 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21576 (INTVAL (x) >> 16) & 0xffff);
21577 return;
21579 case 'U':
21580 /* Print `u' if this has an auto-increment or auto-decrement. */
21581 if (MEM_P (x)
21582 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21583 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21584 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21585 putc ('u', file);
21586 return;
21588 case 'V':
21589 /* Print the trap code for this operand. */
21590 switch (GET_CODE (x))
21592 case EQ:
21593 fputs ("eq", file); /* 4 */
21594 break;
21595 case NE:
21596 fputs ("ne", file); /* 24 */
21597 break;
21598 case LT:
21599 fputs ("lt", file); /* 16 */
21600 break;
21601 case LE:
21602 fputs ("le", file); /* 20 */
21603 break;
21604 case GT:
21605 fputs ("gt", file); /* 8 */
21606 break;
21607 case GE:
21608 fputs ("ge", file); /* 12 */
21609 break;
21610 case LTU:
21611 fputs ("llt", file); /* 2 */
21612 break;
21613 case LEU:
21614 fputs ("lle", file); /* 6 */
21615 break;
21616 case GTU:
21617 fputs ("lgt", file); /* 1 */
21618 break;
21619 case GEU:
21620 fputs ("lge", file); /* 5 */
21621 break;
21622 default:
21623 gcc_unreachable ();
21625 break;
21627 case 'w':
21628 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21629 normally. */
21630 if (INT_P (x))
21631 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21632 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21633 else
21634 print_operand (file, x, 0);
21635 return;
21637 case 'x':
21638 /* X is a FPR or Altivec register used in a VSX context. */
21639 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21640 output_operand_lossage ("invalid %%x value");
21641 else
21643 int reg = REGNO (x);
21644 int vsx_reg = (FP_REGNO_P (reg)
21645 ? reg - 32
21646 : reg - FIRST_ALTIVEC_REGNO + 32);
21648 #ifdef TARGET_REGNAMES
21649 if (TARGET_REGNAMES)
21650 fprintf (file, "%%vs%d", vsx_reg);
21651 else
21652 #endif
21653 fprintf (file, "%d", vsx_reg);
21655 return;
21657 case 'X':
21658 if (MEM_P (x)
21659 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21660 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21661 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21662 putc ('x', file);
21663 return;
21665 case 'Y':
21666 /* Like 'L', for third word of TImode/PTImode */
21667 if (REG_P (x))
21668 fputs (reg_names[REGNO (x) + 2], file);
21669 else if (MEM_P (x))
21671 machine_mode mode = GET_MODE (x);
21672 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21673 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21674 output_address (mode, plus_constant (Pmode,
21675 XEXP (XEXP (x, 0), 0), 8));
21676 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21677 output_address (mode, plus_constant (Pmode,
21678 XEXP (XEXP (x, 0), 0), 8));
21679 else
21680 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21681 if (small_data_operand (x, GET_MODE (x)))
21682 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21683 reg_names[SMALL_DATA_REG]);
21685 return;
21687 case 'z':
21688 /* X is a SYMBOL_REF. Write out the name preceded by a
21689 period and without any trailing data in brackets. Used for function
21690 names. If we are configured for System V (or the embedded ABI) on
21691 the PowerPC, do not emit the period, since those systems do not use
21692 TOCs and the like. */
21693 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21695 /* For macho, check to see if we need a stub. */
21696 if (TARGET_MACHO)
21698 const char *name = XSTR (x, 0);
21699 #if TARGET_MACHO
21700 if (darwin_emit_branch_islands
21701 && MACHOPIC_INDIRECT
21702 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21703 name = machopic_indirection_name (x, /*stub_p=*/true);
21704 #endif
21705 assemble_name (file, name);
21707 else if (!DOT_SYMBOLS)
21708 assemble_name (file, XSTR (x, 0));
21709 else
21710 rs6000_output_function_entry (file, XSTR (x, 0));
21711 return;
21713 case 'Z':
21714 /* Like 'L', for last word of TImode/PTImode. */
21715 if (REG_P (x))
21716 fputs (reg_names[REGNO (x) + 3], file);
21717 else if (MEM_P (x))
21719 machine_mode mode = GET_MODE (x);
21720 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21721 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21722 output_address (mode, plus_constant (Pmode,
21723 XEXP (XEXP (x, 0), 0), 12));
21724 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21725 output_address (mode, plus_constant (Pmode,
21726 XEXP (XEXP (x, 0), 0), 12));
21727 else
21728 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21729 if (small_data_operand (x, GET_MODE (x)))
21730 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21731 reg_names[SMALL_DATA_REG]);
21733 return;
21735 /* Print AltiVec memory operand. */
21736 case 'y':
21738 rtx tmp;
21740 gcc_assert (MEM_P (x));
21742 tmp = XEXP (x, 0);
21744 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21745 && GET_CODE (tmp) == AND
21746 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21747 && INTVAL (XEXP (tmp, 1)) == -16)
21748 tmp = XEXP (tmp, 0);
21749 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21750 && GET_CODE (tmp) == PRE_MODIFY)
21751 tmp = XEXP (tmp, 1);
21752 if (REG_P (tmp))
21753 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21754 else
21756 if (GET_CODE (tmp) != PLUS
21757 || !REG_P (XEXP (tmp, 0))
21758 || !REG_P (XEXP (tmp, 1)))
21760 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21761 break;
21764 if (REGNO (XEXP (tmp, 0)) == 0)
21765 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21766 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21767 else
21768 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21769 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21771 break;
21774 case 0:
21775 if (REG_P (x))
21776 fprintf (file, "%s", reg_names[REGNO (x)]);
21777 else if (MEM_P (x))
21779 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21780 know the width from the mode. */
21781 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21782 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21783 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21784 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21785 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21786 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21787 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21788 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21789 else
21790 output_address (GET_MODE (x), XEXP (x, 0));
21792 else
21794 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21795 /* This hack along with a corresponding hack in
21796 rs6000_output_addr_const_extra arranges to output addends
21797 where the assembler expects to find them. eg.
21798 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21799 without this hack would be output as "x@toc+4". We
21800 want "x+4@toc". */
21801 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21802 else
21803 output_addr_const (file, x);
21805 return;
21807 case '&':
21808 if (const char *name = get_some_local_dynamic_name ())
21809 assemble_name (file, name);
21810 else
21811 output_operand_lossage ("'%%&' used without any "
21812 "local dynamic TLS references");
21813 return;
21815 default:
21816 output_operand_lossage ("invalid %%xn code");
21820 /* Print the address of an operand. */
21822 void
21823 print_operand_address (FILE *file, rtx x)
21825 if (REG_P (x))
21826 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21827 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21828 || GET_CODE (x) == LABEL_REF)
21830 output_addr_const (file, x);
21831 if (small_data_operand (x, GET_MODE (x)))
21832 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21833 reg_names[SMALL_DATA_REG]);
21834 else
21835 gcc_assert (!TARGET_TOC);
21837 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21838 && REG_P (XEXP (x, 1)))
21840 if (REGNO (XEXP (x, 0)) == 0)
21841 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21842 reg_names[ REGNO (XEXP (x, 0)) ]);
21843 else
21844 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21845 reg_names[ REGNO (XEXP (x, 1)) ]);
21847 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21848 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21849 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21850 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21851 #if TARGET_MACHO
21852 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21853 && CONSTANT_P (XEXP (x, 1)))
21855 fprintf (file, "lo16(");
21856 output_addr_const (file, XEXP (x, 1));
21857 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21859 #endif
21860 #if TARGET_ELF
21861 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21862 && CONSTANT_P (XEXP (x, 1)))
21864 output_addr_const (file, XEXP (x, 1));
21865 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21867 #endif
21868 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21870 /* This hack along with a corresponding hack in
21871 rs6000_output_addr_const_extra arranges to output addends
21872 where the assembler expects to find them. eg.
21873 (lo_sum (reg 9)
21874 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21875 without this hack would be output as "x@toc+8@l(9)". We
21876 want "x+8@toc@l(9)". */
21877 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21878 if (GET_CODE (x) == LO_SUM)
21879 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21880 else
21881 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21883 else
21884 gcc_unreachable ();
21887 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21889 static bool
21890 rs6000_output_addr_const_extra (FILE *file, rtx x)
21892 if (GET_CODE (x) == UNSPEC)
21893 switch (XINT (x, 1))
21895 case UNSPEC_TOCREL:
21896 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21897 && REG_P (XVECEXP (x, 0, 1))
21898 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21899 output_addr_const (file, XVECEXP (x, 0, 0));
21900 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21902 if (INTVAL (tocrel_offset_oac) >= 0)
21903 fprintf (file, "+");
21904 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21906 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21908 putc ('-', file);
21909 assemble_name (file, toc_label_name);
21910 need_toc_init = 1;
21912 else if (TARGET_ELF)
21913 fputs ("@toc", file);
21914 return true;
21916 #if TARGET_MACHO
21917 case UNSPEC_MACHOPIC_OFFSET:
21918 output_addr_const (file, XVECEXP (x, 0, 0));
21919 putc ('-', file);
21920 machopic_output_function_base_name (file);
21921 return true;
21922 #endif
21924 return false;
21927 /* Target hook for assembling integer objects. The PowerPC version has
21928 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21929 is defined. It also needs to handle DI-mode objects on 64-bit
21930 targets. */
21932 static bool
21933 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21935 #ifdef RELOCATABLE_NEEDS_FIXUP
21936 /* Special handling for SI values. */
21937 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21939 static int recurse = 0;
21941 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21942 the .fixup section. Since the TOC section is already relocated, we
21943 don't need to mark it here. We used to skip the text section, but it
21944 should never be valid for relocated addresses to be placed in the text
21945 section. */
21946 if (DEFAULT_ABI == ABI_V4
21947 && (TARGET_RELOCATABLE || flag_pic > 1)
21948 && in_section != toc_section
21949 && !recurse
21950 && !CONST_SCALAR_INT_P (x)
21951 && CONSTANT_P (x))
21953 char buf[256];
21955 recurse = 1;
21956 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21957 fixuplabelno++;
21958 ASM_OUTPUT_LABEL (asm_out_file, buf);
21959 fprintf (asm_out_file, "\t.long\t(");
21960 output_addr_const (asm_out_file, x);
21961 fprintf (asm_out_file, ")@fixup\n");
21962 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21963 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21964 fprintf (asm_out_file, "\t.long\t");
21965 assemble_name (asm_out_file, buf);
21966 fprintf (asm_out_file, "\n\t.previous\n");
21967 recurse = 0;
21968 return true;
21970 /* Remove initial .'s to turn a -mcall-aixdesc function
21971 address into the address of the descriptor, not the function
21972 itself. */
21973 else if (GET_CODE (x) == SYMBOL_REF
21974 && XSTR (x, 0)[0] == '.'
21975 && DEFAULT_ABI == ABI_AIX)
21977 const char *name = XSTR (x, 0);
21978 while (*name == '.')
21979 name++;
21981 fprintf (asm_out_file, "\t.long\t%s\n", name);
21982 return true;
21985 #endif /* RELOCATABLE_NEEDS_FIXUP */
21986 return default_assemble_integer (x, size, aligned_p);
21989 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21990 /* Emit an assembler directive to set symbol visibility for DECL to
21991 VISIBILITY_TYPE. */
21993 static void
21994 rs6000_assemble_visibility (tree decl, int vis)
21996 if (TARGET_XCOFF)
21997 return;
21999 /* Functions need to have their entry point symbol visibility set as
22000 well as their descriptor symbol visibility. */
22001 if (DEFAULT_ABI == ABI_AIX
22002 && DOT_SYMBOLS
22003 && TREE_CODE (decl) == FUNCTION_DECL)
22005 static const char * const visibility_types[] = {
22006 NULL, "protected", "hidden", "internal"
22009 const char *name, *type;
22011 name = ((* targetm.strip_name_encoding)
22012 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
22013 type = visibility_types[vis];
22015 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
22016 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
22018 else
22019 default_assemble_visibility (decl, vis);
22021 #endif
22023 enum rtx_code
22024 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
22026 /* Reversal of FP compares takes care -- an ordered compare
22027 becomes an unordered compare and vice versa. */
22028 if (mode == CCFPmode
22029 && (!flag_finite_math_only
22030 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
22031 || code == UNEQ || code == LTGT))
22032 return reverse_condition_maybe_unordered (code);
22033 else
22034 return reverse_condition (code);
22037 /* Generate a compare for CODE. Return a brand-new rtx that
22038 represents the result of the compare. */
22040 static rtx
22041 rs6000_generate_compare (rtx cmp, machine_mode mode)
22043 machine_mode comp_mode;
22044 rtx compare_result;
22045 enum rtx_code code = GET_CODE (cmp);
22046 rtx op0 = XEXP (cmp, 0);
22047 rtx op1 = XEXP (cmp, 1);
22049 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22050 comp_mode = CCmode;
22051 else if (FLOAT_MODE_P (mode))
22052 comp_mode = CCFPmode;
22053 else if (code == GTU || code == LTU
22054 || code == GEU || code == LEU)
22055 comp_mode = CCUNSmode;
22056 else if ((code == EQ || code == NE)
22057 && unsigned_reg_p (op0)
22058 && (unsigned_reg_p (op1)
22059 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
22060 /* These are unsigned values, perhaps there will be a later
22061 ordering compare that can be shared with this one. */
22062 comp_mode = CCUNSmode;
22063 else
22064 comp_mode = CCmode;
22066 /* If we have an unsigned compare, make sure we don't have a signed value as
22067 an immediate. */
22068 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
22069 && INTVAL (op1) < 0)
22071 op0 = copy_rtx_if_shared (op0);
22072 op1 = force_reg (GET_MODE (op0), op1);
22073 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
22076 /* First, the compare. */
22077 compare_result = gen_reg_rtx (comp_mode);
22079 /* IEEE 128-bit support in VSX registers when we do not have hardware
22080 support. */
22081 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22083 rtx libfunc = NULL_RTX;
22084 bool check_nan = false;
22085 rtx dest;
22087 switch (code)
22089 case EQ:
22090 case NE:
22091 libfunc = optab_libfunc (eq_optab, mode);
22092 break;
22094 case GT:
22095 case GE:
22096 libfunc = optab_libfunc (ge_optab, mode);
22097 break;
22099 case LT:
22100 case LE:
22101 libfunc = optab_libfunc (le_optab, mode);
22102 break;
22104 case UNORDERED:
22105 case ORDERED:
22106 libfunc = optab_libfunc (unord_optab, mode);
22107 code = (code == UNORDERED) ? NE : EQ;
22108 break;
22110 case UNGE:
22111 case UNGT:
22112 check_nan = true;
22113 libfunc = optab_libfunc (ge_optab, mode);
22114 code = (code == UNGE) ? GE : GT;
22115 break;
22117 case UNLE:
22118 case UNLT:
22119 check_nan = true;
22120 libfunc = optab_libfunc (le_optab, mode);
22121 code = (code == UNLE) ? LE : LT;
22122 break;
22124 case UNEQ:
22125 case LTGT:
22126 check_nan = true;
22127 libfunc = optab_libfunc (eq_optab, mode);
22128 code = (code = UNEQ) ? EQ : NE;
22129 break;
22131 default:
22132 gcc_unreachable ();
22135 gcc_assert (libfunc);
22137 if (!check_nan)
22138 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22139 SImode, op0, mode, op1, mode);
22141 /* The library signals an exception for signalling NaNs, so we need to
22142 handle isgreater, etc. by first checking isordered. */
22143 else
22145 rtx ne_rtx, normal_dest, unord_dest;
22146 rtx unord_func = optab_libfunc (unord_optab, mode);
22147 rtx join_label = gen_label_rtx ();
22148 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22149 rtx unord_cmp = gen_reg_rtx (comp_mode);
22152 /* Test for either value being a NaN. */
22153 gcc_assert (unord_func);
22154 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22155 SImode, op0, mode, op1, mode);
22157 /* Set value (0) if either value is a NaN, and jump to the join
22158 label. */
22159 dest = gen_reg_rtx (SImode);
22160 emit_move_insn (dest, const1_rtx);
22161 emit_insn (gen_rtx_SET (unord_cmp,
22162 gen_rtx_COMPARE (comp_mode, unord_dest,
22163 const0_rtx)));
22165 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22166 emit_jump_insn (gen_rtx_SET (pc_rtx,
22167 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22168 join_ref,
22169 pc_rtx)));
22171 /* Do the normal comparison, knowing that the values are not
22172 NaNs. */
22173 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22174 SImode, op0, mode, op1, mode);
22176 emit_insn (gen_cstoresi4 (dest,
22177 gen_rtx_fmt_ee (code, SImode, normal_dest,
22178 const0_rtx),
22179 normal_dest, const0_rtx));
22181 /* Join NaN and non-Nan paths. Compare dest against 0. */
22182 emit_label (join_label);
22183 code = NE;
22186 emit_insn (gen_rtx_SET (compare_result,
22187 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22190 else
22192 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22193 CLOBBERs to match cmptf_internal2 pattern. */
22194 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22195 && FLOAT128_IBM_P (GET_MODE (op0))
22196 && TARGET_HARD_FLOAT)
22197 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22198 gen_rtvec (10,
22199 gen_rtx_SET (compare_result,
22200 gen_rtx_COMPARE (comp_mode, op0, op1)),
22201 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22202 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22203 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22204 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22205 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22206 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22207 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22208 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22209 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22210 else if (GET_CODE (op1) == UNSPEC
22211 && XINT (op1, 1) == UNSPEC_SP_TEST)
22213 rtx op1b = XVECEXP (op1, 0, 0);
22214 comp_mode = CCEQmode;
22215 compare_result = gen_reg_rtx (CCEQmode);
22216 if (TARGET_64BIT)
22217 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22218 else
22219 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22221 else
22222 emit_insn (gen_rtx_SET (compare_result,
22223 gen_rtx_COMPARE (comp_mode, op0, op1)));
22226 /* Some kinds of FP comparisons need an OR operation;
22227 under flag_finite_math_only we don't bother. */
22228 if (FLOAT_MODE_P (mode)
22229 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22230 && !flag_finite_math_only
22231 && (code == LE || code == GE
22232 || code == UNEQ || code == LTGT
22233 || code == UNGT || code == UNLT))
22235 enum rtx_code or1, or2;
22236 rtx or1_rtx, or2_rtx, compare2_rtx;
22237 rtx or_result = gen_reg_rtx (CCEQmode);
22239 switch (code)
22241 case LE: or1 = LT; or2 = EQ; break;
22242 case GE: or1 = GT; or2 = EQ; break;
22243 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22244 case LTGT: or1 = LT; or2 = GT; break;
22245 case UNGT: or1 = UNORDERED; or2 = GT; break;
22246 case UNLT: or1 = UNORDERED; or2 = LT; break;
22247 default: gcc_unreachable ();
22249 validate_condition_mode (or1, comp_mode);
22250 validate_condition_mode (or2, comp_mode);
22251 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22252 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22253 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22254 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22255 const_true_rtx);
22256 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22258 compare_result = or_result;
22259 code = EQ;
22262 validate_condition_mode (code, GET_MODE (compare_result));
22264 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22268 /* Return the diagnostic message string if the binary operation OP is
22269 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22271 static const char*
22272 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22273 const_tree type1,
22274 const_tree type2)
22276 machine_mode mode1 = TYPE_MODE (type1);
22277 machine_mode mode2 = TYPE_MODE (type2);
22279 /* For complex modes, use the inner type. */
22280 if (COMPLEX_MODE_P (mode1))
22281 mode1 = GET_MODE_INNER (mode1);
22283 if (COMPLEX_MODE_P (mode2))
22284 mode2 = GET_MODE_INNER (mode2);
22286 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22287 double to intermix unless -mfloat128-convert. */
22288 if (mode1 == mode2)
22289 return NULL;
22291 if (!TARGET_FLOAT128_CVT)
22293 if ((mode1 == KFmode && mode2 == IFmode)
22294 || (mode1 == IFmode && mode2 == KFmode))
22295 return N_("__float128 and __ibm128 cannot be used in the same "
22296 "expression");
22298 if (TARGET_IEEEQUAD
22299 && ((mode1 == IFmode && mode2 == TFmode)
22300 || (mode1 == TFmode && mode2 == IFmode)))
22301 return N_("__ibm128 and long double cannot be used in the same "
22302 "expression");
22304 if (!TARGET_IEEEQUAD
22305 && ((mode1 == KFmode && mode2 == TFmode)
22306 || (mode1 == TFmode && mode2 == KFmode)))
22307 return N_("__float128 and long double cannot be used in the same "
22308 "expression");
22311 return NULL;
22315 /* Expand floating point conversion to/from __float128 and __ibm128. */
22317 void
22318 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22320 machine_mode dest_mode = GET_MODE (dest);
22321 machine_mode src_mode = GET_MODE (src);
22322 convert_optab cvt = unknown_optab;
22323 bool do_move = false;
22324 rtx libfunc = NULL_RTX;
22325 rtx dest2;
22326 typedef rtx (*rtx_2func_t) (rtx, rtx);
22327 rtx_2func_t hw_convert = (rtx_2func_t)0;
22328 size_t kf_or_tf;
22330 struct hw_conv_t {
22331 rtx_2func_t from_df;
22332 rtx_2func_t from_sf;
22333 rtx_2func_t from_si_sign;
22334 rtx_2func_t from_si_uns;
22335 rtx_2func_t from_di_sign;
22336 rtx_2func_t from_di_uns;
22337 rtx_2func_t to_df;
22338 rtx_2func_t to_sf;
22339 rtx_2func_t to_si_sign;
22340 rtx_2func_t to_si_uns;
22341 rtx_2func_t to_di_sign;
22342 rtx_2func_t to_di_uns;
22343 } hw_conversions[2] = {
22344 /* convertions to/from KFmode */
22346 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22347 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22348 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22349 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22350 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22351 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22352 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22353 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22354 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22355 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22356 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22357 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22360 /* convertions to/from TFmode */
22362 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22363 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22364 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22365 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22366 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22367 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22368 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22369 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22370 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22371 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22372 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22373 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22377 if (dest_mode == src_mode)
22378 gcc_unreachable ();
22380 /* Eliminate memory operations. */
22381 if (MEM_P (src))
22382 src = force_reg (src_mode, src);
22384 if (MEM_P (dest))
22386 rtx tmp = gen_reg_rtx (dest_mode);
22387 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22388 rs6000_emit_move (dest, tmp, dest_mode);
22389 return;
22392 /* Convert to IEEE 128-bit floating point. */
22393 if (FLOAT128_IEEE_P (dest_mode))
22395 if (dest_mode == KFmode)
22396 kf_or_tf = 0;
22397 else if (dest_mode == TFmode)
22398 kf_or_tf = 1;
22399 else
22400 gcc_unreachable ();
22402 switch (src_mode)
22404 case E_DFmode:
22405 cvt = sext_optab;
22406 hw_convert = hw_conversions[kf_or_tf].from_df;
22407 break;
22409 case E_SFmode:
22410 cvt = sext_optab;
22411 hw_convert = hw_conversions[kf_or_tf].from_sf;
22412 break;
22414 case E_KFmode:
22415 case E_IFmode:
22416 case E_TFmode:
22417 if (FLOAT128_IBM_P (src_mode))
22418 cvt = sext_optab;
22419 else
22420 do_move = true;
22421 break;
22423 case E_SImode:
22424 if (unsigned_p)
22426 cvt = ufloat_optab;
22427 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22429 else
22431 cvt = sfloat_optab;
22432 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22434 break;
22436 case E_DImode:
22437 if (unsigned_p)
22439 cvt = ufloat_optab;
22440 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22442 else
22444 cvt = sfloat_optab;
22445 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22447 break;
22449 default:
22450 gcc_unreachable ();
22454 /* Convert from IEEE 128-bit floating point. */
22455 else if (FLOAT128_IEEE_P (src_mode))
22457 if (src_mode == KFmode)
22458 kf_or_tf = 0;
22459 else if (src_mode == TFmode)
22460 kf_or_tf = 1;
22461 else
22462 gcc_unreachable ();
22464 switch (dest_mode)
22466 case E_DFmode:
22467 cvt = trunc_optab;
22468 hw_convert = hw_conversions[kf_or_tf].to_df;
22469 break;
22471 case E_SFmode:
22472 cvt = trunc_optab;
22473 hw_convert = hw_conversions[kf_or_tf].to_sf;
22474 break;
22476 case E_KFmode:
22477 case E_IFmode:
22478 case E_TFmode:
22479 if (FLOAT128_IBM_P (dest_mode))
22480 cvt = trunc_optab;
22481 else
22482 do_move = true;
22483 break;
22485 case E_SImode:
22486 if (unsigned_p)
22488 cvt = ufix_optab;
22489 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22491 else
22493 cvt = sfix_optab;
22494 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22496 break;
22498 case E_DImode:
22499 if (unsigned_p)
22501 cvt = ufix_optab;
22502 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22504 else
22506 cvt = sfix_optab;
22507 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22509 break;
22511 default:
22512 gcc_unreachable ();
22516 /* Both IBM format. */
22517 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22518 do_move = true;
22520 else
22521 gcc_unreachable ();
22523 /* Handle conversion between TFmode/KFmode. */
22524 if (do_move)
22525 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22527 /* Handle conversion if we have hardware support. */
22528 else if (TARGET_FLOAT128_HW && hw_convert)
22529 emit_insn ((hw_convert) (dest, src));
22531 /* Call an external function to do the conversion. */
22532 else if (cvt != unknown_optab)
22534 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22535 gcc_assert (libfunc != NULL_RTX);
22537 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22538 src, src_mode);
22540 gcc_assert (dest2 != NULL_RTX);
22541 if (!rtx_equal_p (dest, dest2))
22542 emit_move_insn (dest, dest2);
22545 else
22546 gcc_unreachable ();
22548 return;
22552 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22553 can be used as that dest register. Return the dest register. */
22556 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22558 if (op2 == const0_rtx)
22559 return op1;
22561 if (GET_CODE (scratch) == SCRATCH)
22562 scratch = gen_reg_rtx (mode);
22564 if (logical_operand (op2, mode))
22565 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22566 else
22567 emit_insn (gen_rtx_SET (scratch,
22568 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22570 return scratch;
22573 void
22574 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22576 rtx condition_rtx;
22577 machine_mode op_mode;
22578 enum rtx_code cond_code;
22579 rtx result = operands[0];
22581 condition_rtx = rs6000_generate_compare (operands[1], mode);
22582 cond_code = GET_CODE (condition_rtx);
22584 if (cond_code == NE
22585 || cond_code == GE || cond_code == LE
22586 || cond_code == GEU || cond_code == LEU
22587 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22589 rtx not_result = gen_reg_rtx (CCEQmode);
22590 rtx not_op, rev_cond_rtx;
22591 machine_mode cc_mode;
22593 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22595 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22596 SImode, XEXP (condition_rtx, 0), const0_rtx);
22597 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22598 emit_insn (gen_rtx_SET (not_result, not_op));
22599 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22602 op_mode = GET_MODE (XEXP (operands[1], 0));
22603 if (op_mode == VOIDmode)
22604 op_mode = GET_MODE (XEXP (operands[1], 1));
22606 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22608 PUT_MODE (condition_rtx, DImode);
22609 convert_move (result, condition_rtx, 0);
22611 else
22613 PUT_MODE (condition_rtx, SImode);
22614 emit_insn (gen_rtx_SET (result, condition_rtx));
22618 /* Emit a branch of kind CODE to location LOC. */
22620 void
22621 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22623 rtx condition_rtx, loc_ref;
22625 condition_rtx = rs6000_generate_compare (operands[0], mode);
22626 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22627 emit_jump_insn (gen_rtx_SET (pc_rtx,
22628 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22629 loc_ref, pc_rtx)));
22632 /* Return the string to output a conditional branch to LABEL, which is
22633 the operand template of the label, or NULL if the branch is really a
22634 conditional return.
22636 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22637 condition code register and its mode specifies what kind of
22638 comparison we made.
22640 REVERSED is nonzero if we should reverse the sense of the comparison.
22642 INSN is the insn. */
22644 char *
22645 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22647 static char string[64];
22648 enum rtx_code code = GET_CODE (op);
22649 rtx cc_reg = XEXP (op, 0);
22650 machine_mode mode = GET_MODE (cc_reg);
22651 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22652 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22653 int really_reversed = reversed ^ need_longbranch;
22654 char *s = string;
22655 const char *ccode;
22656 const char *pred;
22657 rtx note;
22659 validate_condition_mode (code, mode);
22661 /* Work out which way this really branches. We could use
22662 reverse_condition_maybe_unordered here always but this
22663 makes the resulting assembler clearer. */
22664 if (really_reversed)
22666 /* Reversal of FP compares takes care -- an ordered compare
22667 becomes an unordered compare and vice versa. */
22668 if (mode == CCFPmode)
22669 code = reverse_condition_maybe_unordered (code);
22670 else
22671 code = reverse_condition (code);
22674 switch (code)
22676 /* Not all of these are actually distinct opcodes, but
22677 we distinguish them for clarity of the resulting assembler. */
22678 case NE: case LTGT:
22679 ccode = "ne"; break;
22680 case EQ: case UNEQ:
22681 ccode = "eq"; break;
22682 case GE: case GEU:
22683 ccode = "ge"; break;
22684 case GT: case GTU: case UNGT:
22685 ccode = "gt"; break;
22686 case LE: case LEU:
22687 ccode = "le"; break;
22688 case LT: case LTU: case UNLT:
22689 ccode = "lt"; break;
22690 case UNORDERED: ccode = "un"; break;
22691 case ORDERED: ccode = "nu"; break;
22692 case UNGE: ccode = "nl"; break;
22693 case UNLE: ccode = "ng"; break;
22694 default:
22695 gcc_unreachable ();
22698 /* Maybe we have a guess as to how likely the branch is. */
22699 pred = "";
22700 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22701 if (note != NULL_RTX)
22703 /* PROB is the difference from 50%. */
22704 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22705 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22707 /* Only hint for highly probable/improbable branches on newer cpus when
22708 we have real profile data, as static prediction overrides processor
22709 dynamic prediction. For older cpus we may as well always hint, but
22710 assume not taken for branches that are very close to 50% as a
22711 mispredicted taken branch is more expensive than a
22712 mispredicted not-taken branch. */
22713 if (rs6000_always_hint
22714 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22715 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22716 && br_prob_note_reliable_p (note)))
22718 if (abs (prob) > REG_BR_PROB_BASE / 20
22719 && ((prob > 0) ^ need_longbranch))
22720 pred = "+";
22721 else
22722 pred = "-";
22726 if (label == NULL)
22727 s += sprintf (s, "b%slr%s ", ccode, pred);
22728 else
22729 s += sprintf (s, "b%s%s ", ccode, pred);
22731 /* We need to escape any '%' characters in the reg_names string.
22732 Assume they'd only be the first character.... */
22733 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22734 *s++ = '%';
22735 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22737 if (label != NULL)
22739 /* If the branch distance was too far, we may have to use an
22740 unconditional branch to go the distance. */
22741 if (need_longbranch)
22742 s += sprintf (s, ",$+8\n\tb %s", label);
22743 else
22744 s += sprintf (s, ",%s", label);
22747 return string;
22750 /* Return insn for VSX or Altivec comparisons. */
22752 static rtx
22753 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22755 rtx mask;
22756 machine_mode mode = GET_MODE (op0);
22758 switch (code)
22760 default:
22761 break;
22763 case GE:
22764 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22765 return NULL_RTX;
22766 /* FALLTHRU */
22768 case EQ:
22769 case GT:
22770 case GTU:
22771 case ORDERED:
22772 case UNORDERED:
22773 case UNEQ:
22774 case LTGT:
22775 mask = gen_reg_rtx (mode);
22776 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22777 return mask;
22780 return NULL_RTX;
22783 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22784 DMODE is expected destination mode. This is a recursive function. */
22786 static rtx
22787 rs6000_emit_vector_compare (enum rtx_code rcode,
22788 rtx op0, rtx op1,
22789 machine_mode dmode)
22791 rtx mask;
22792 bool swap_operands = false;
22793 bool try_again = false;
22795 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22796 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22798 /* See if the comparison works as is. */
22799 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22800 if (mask)
22801 return mask;
22803 switch (rcode)
22805 case LT:
22806 rcode = GT;
22807 swap_operands = true;
22808 try_again = true;
22809 break;
22810 case LTU:
22811 rcode = GTU;
22812 swap_operands = true;
22813 try_again = true;
22814 break;
22815 case NE:
22816 case UNLE:
22817 case UNLT:
22818 case UNGE:
22819 case UNGT:
22820 /* Invert condition and try again.
22821 e.g., A != B becomes ~(A==B). */
22823 enum rtx_code rev_code;
22824 enum insn_code nor_code;
22825 rtx mask2;
22827 rev_code = reverse_condition_maybe_unordered (rcode);
22828 if (rev_code == UNKNOWN)
22829 return NULL_RTX;
22831 nor_code = optab_handler (one_cmpl_optab, dmode);
22832 if (nor_code == CODE_FOR_nothing)
22833 return NULL_RTX;
22835 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22836 if (!mask2)
22837 return NULL_RTX;
22839 mask = gen_reg_rtx (dmode);
22840 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22841 return mask;
22843 break;
22844 case GE:
22845 case GEU:
22846 case LE:
22847 case LEU:
22848 /* Try GT/GTU/LT/LTU OR EQ */
22850 rtx c_rtx, eq_rtx;
22851 enum insn_code ior_code;
22852 enum rtx_code new_code;
22854 switch (rcode)
22856 case GE:
22857 new_code = GT;
22858 break;
22860 case GEU:
22861 new_code = GTU;
22862 break;
22864 case LE:
22865 new_code = LT;
22866 break;
22868 case LEU:
22869 new_code = LTU;
22870 break;
22872 default:
22873 gcc_unreachable ();
22876 ior_code = optab_handler (ior_optab, dmode);
22877 if (ior_code == CODE_FOR_nothing)
22878 return NULL_RTX;
22880 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22881 if (!c_rtx)
22882 return NULL_RTX;
22884 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22885 if (!eq_rtx)
22886 return NULL_RTX;
22888 mask = gen_reg_rtx (dmode);
22889 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22890 return mask;
22892 break;
22893 default:
22894 return NULL_RTX;
22897 if (try_again)
22899 if (swap_operands)
22900 std::swap (op0, op1);
22902 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22903 if (mask)
22904 return mask;
22907 /* You only get two chances. */
22908 return NULL_RTX;
22911 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22912 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22913 operands for the relation operation COND. */
22916 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22917 rtx cond, rtx cc_op0, rtx cc_op1)
22919 machine_mode dest_mode = GET_MODE (dest);
22920 machine_mode mask_mode = GET_MODE (cc_op0);
22921 enum rtx_code rcode = GET_CODE (cond);
22922 machine_mode cc_mode = CCmode;
22923 rtx mask;
22924 rtx cond2;
22925 bool invert_move = false;
22927 if (VECTOR_UNIT_NONE_P (dest_mode))
22928 return 0;
22930 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22931 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22933 switch (rcode)
22935 /* Swap operands if we can, and fall back to doing the operation as
22936 specified, and doing a NOR to invert the test. */
22937 case NE:
22938 case UNLE:
22939 case UNLT:
22940 case UNGE:
22941 case UNGT:
22942 /* Invert condition and try again.
22943 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22944 invert_move = true;
22945 rcode = reverse_condition_maybe_unordered (rcode);
22946 if (rcode == UNKNOWN)
22947 return 0;
22948 break;
22950 case GE:
22951 case LE:
22952 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22954 /* Invert condition to avoid compound test. */
22955 invert_move = true;
22956 rcode = reverse_condition (rcode);
22958 break;
22960 case GTU:
22961 case GEU:
22962 case LTU:
22963 case LEU:
22964 /* Mark unsigned tests with CCUNSmode. */
22965 cc_mode = CCUNSmode;
22967 /* Invert condition to avoid compound test if necessary. */
22968 if (rcode == GEU || rcode == LEU)
22970 invert_move = true;
22971 rcode = reverse_condition (rcode);
22973 break;
22975 default:
22976 break;
22979 /* Get the vector mask for the given relational operations. */
22980 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22982 if (!mask)
22983 return 0;
22985 if (invert_move)
22986 std::swap (op_true, op_false);
22988 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22989 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22990 && (GET_CODE (op_true) == CONST_VECTOR
22991 || GET_CODE (op_false) == CONST_VECTOR))
22993 rtx constant_0 = CONST0_RTX (dest_mode);
22994 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22996 if (op_true == constant_m1 && op_false == constant_0)
22998 emit_move_insn (dest, mask);
22999 return 1;
23002 else if (op_true == constant_0 && op_false == constant_m1)
23004 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
23005 return 1;
23008 /* If we can't use the vector comparison directly, perhaps we can use
23009 the mask for the true or false fields, instead of loading up a
23010 constant. */
23011 if (op_true == constant_m1)
23012 op_true = mask;
23014 if (op_false == constant_0)
23015 op_false = mask;
23018 if (!REG_P (op_true) && !SUBREG_P (op_true))
23019 op_true = force_reg (dest_mode, op_true);
23021 if (!REG_P (op_false) && !SUBREG_P (op_false))
23022 op_false = force_reg (dest_mode, op_false);
23024 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
23025 CONST0_RTX (dest_mode));
23026 emit_insn (gen_rtx_SET (dest,
23027 gen_rtx_IF_THEN_ELSE (dest_mode,
23028 cond2,
23029 op_true,
23030 op_false)));
23031 return 1;
23034 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
23035 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
23036 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
23037 hardware has no such operation. */
23039 static int
23040 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23042 enum rtx_code code = GET_CODE (op);
23043 rtx op0 = XEXP (op, 0);
23044 rtx op1 = XEXP (op, 1);
23045 machine_mode compare_mode = GET_MODE (op0);
23046 machine_mode result_mode = GET_MODE (dest);
23047 bool max_p = false;
23049 if (result_mode != compare_mode)
23050 return 0;
23052 if (code == GE || code == GT)
23053 max_p = true;
23054 else if (code == LE || code == LT)
23055 max_p = false;
23056 else
23057 return 0;
23059 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
23062 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
23063 max_p = !max_p;
23065 else
23066 return 0;
23068 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
23069 return 1;
23072 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23073 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23074 operands of the last comparison is nonzero/true, FALSE_COND if it is
23075 zero/false. Return 0 if the hardware has no such operation. */
23077 static int
23078 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23080 enum rtx_code code = GET_CODE (op);
23081 rtx op0 = XEXP (op, 0);
23082 rtx op1 = XEXP (op, 1);
23083 machine_mode result_mode = GET_MODE (dest);
23084 rtx compare_rtx;
23085 rtx cmove_rtx;
23086 rtx clobber_rtx;
23088 if (!can_create_pseudo_p ())
23089 return 0;
23091 switch (code)
23093 case EQ:
23094 case GE:
23095 case GT:
23096 break;
23098 case NE:
23099 case LT:
23100 case LE:
23101 code = swap_condition (code);
23102 std::swap (op0, op1);
23103 break;
23105 default:
23106 return 0;
23109 /* Generate: [(parallel [(set (dest)
23110 (if_then_else (op (cmp1) (cmp2))
23111 (true)
23112 (false)))
23113 (clobber (scratch))])]. */
23115 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23116 cmove_rtx = gen_rtx_SET (dest,
23117 gen_rtx_IF_THEN_ELSE (result_mode,
23118 compare_rtx,
23119 true_cond,
23120 false_cond));
23122 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23123 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23124 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23126 return 1;
23129 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23130 operands of the last comparison is nonzero/true, FALSE_COND if it
23131 is zero/false. Return 0 if the hardware has no such operation. */
23134 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23136 enum rtx_code code = GET_CODE (op);
23137 rtx op0 = XEXP (op, 0);
23138 rtx op1 = XEXP (op, 1);
23139 machine_mode compare_mode = GET_MODE (op0);
23140 machine_mode result_mode = GET_MODE (dest);
23141 rtx temp;
23142 bool is_against_zero;
23144 /* These modes should always match. */
23145 if (GET_MODE (op1) != compare_mode
23146 /* In the isel case however, we can use a compare immediate, so
23147 op1 may be a small constant. */
23148 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23149 return 0;
23150 if (GET_MODE (true_cond) != result_mode)
23151 return 0;
23152 if (GET_MODE (false_cond) != result_mode)
23153 return 0;
23155 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23156 if (TARGET_P9_MINMAX
23157 && (compare_mode == SFmode || compare_mode == DFmode)
23158 && (result_mode == SFmode || result_mode == DFmode))
23160 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23161 return 1;
23163 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23164 return 1;
23167 /* Don't allow using floating point comparisons for integer results for
23168 now. */
23169 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23170 return 0;
23172 /* First, work out if the hardware can do this at all, or
23173 if it's too slow.... */
23174 if (!FLOAT_MODE_P (compare_mode))
23176 if (TARGET_ISEL)
23177 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23178 return 0;
23181 is_against_zero = op1 == CONST0_RTX (compare_mode);
23183 /* A floating-point subtract might overflow, underflow, or produce
23184 an inexact result, thus changing the floating-point flags, so it
23185 can't be generated if we care about that. It's safe if one side
23186 of the construct is zero, since then no subtract will be
23187 generated. */
23188 if (SCALAR_FLOAT_MODE_P (compare_mode)
23189 && flag_trapping_math && ! is_against_zero)
23190 return 0;
23192 /* Eliminate half of the comparisons by switching operands, this
23193 makes the remaining code simpler. */
23194 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23195 || code == LTGT || code == LT || code == UNLE)
23197 code = reverse_condition_maybe_unordered (code);
23198 temp = true_cond;
23199 true_cond = false_cond;
23200 false_cond = temp;
23203 /* UNEQ and LTGT take four instructions for a comparison with zero,
23204 it'll probably be faster to use a branch here too. */
23205 if (code == UNEQ && HONOR_NANS (compare_mode))
23206 return 0;
23208 /* We're going to try to implement comparisons by performing
23209 a subtract, then comparing against zero. Unfortunately,
23210 Inf - Inf is NaN which is not zero, and so if we don't
23211 know that the operand is finite and the comparison
23212 would treat EQ different to UNORDERED, we can't do it. */
23213 if (HONOR_INFINITIES (compare_mode)
23214 && code != GT && code != UNGE
23215 && (GET_CODE (op1) != CONST_DOUBLE
23216 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23217 /* Constructs of the form (a OP b ? a : b) are safe. */
23218 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23219 || (! rtx_equal_p (op0, true_cond)
23220 && ! rtx_equal_p (op1, true_cond))))
23221 return 0;
23223 /* At this point we know we can use fsel. */
23225 /* Reduce the comparison to a comparison against zero. */
23226 if (! is_against_zero)
23228 temp = gen_reg_rtx (compare_mode);
23229 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23230 op0 = temp;
23231 op1 = CONST0_RTX (compare_mode);
23234 /* If we don't care about NaNs we can reduce some of the comparisons
23235 down to faster ones. */
23236 if (! HONOR_NANS (compare_mode))
23237 switch (code)
23239 case GT:
23240 code = LE;
23241 temp = true_cond;
23242 true_cond = false_cond;
23243 false_cond = temp;
23244 break;
23245 case UNGE:
23246 code = GE;
23247 break;
23248 case UNEQ:
23249 code = EQ;
23250 break;
23251 default:
23252 break;
23255 /* Now, reduce everything down to a GE. */
23256 switch (code)
23258 case GE:
23259 break;
23261 case LE:
23262 temp = gen_reg_rtx (compare_mode);
23263 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23264 op0 = temp;
23265 break;
23267 case ORDERED:
23268 temp = gen_reg_rtx (compare_mode);
23269 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23270 op0 = temp;
23271 break;
23273 case EQ:
23274 temp = gen_reg_rtx (compare_mode);
23275 emit_insn (gen_rtx_SET (temp,
23276 gen_rtx_NEG (compare_mode,
23277 gen_rtx_ABS (compare_mode, op0))));
23278 op0 = temp;
23279 break;
23281 case UNGE:
23282 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23283 temp = gen_reg_rtx (result_mode);
23284 emit_insn (gen_rtx_SET (temp,
23285 gen_rtx_IF_THEN_ELSE (result_mode,
23286 gen_rtx_GE (VOIDmode,
23287 op0, op1),
23288 true_cond, false_cond)));
23289 false_cond = true_cond;
23290 true_cond = temp;
23292 temp = gen_reg_rtx (compare_mode);
23293 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23294 op0 = temp;
23295 break;
23297 case GT:
23298 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23299 temp = gen_reg_rtx (result_mode);
23300 emit_insn (gen_rtx_SET (temp,
23301 gen_rtx_IF_THEN_ELSE (result_mode,
23302 gen_rtx_GE (VOIDmode,
23303 op0, op1),
23304 true_cond, false_cond)));
23305 true_cond = false_cond;
23306 false_cond = temp;
23308 temp = gen_reg_rtx (compare_mode);
23309 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23310 op0 = temp;
23311 break;
23313 default:
23314 gcc_unreachable ();
23317 emit_insn (gen_rtx_SET (dest,
23318 gen_rtx_IF_THEN_ELSE (result_mode,
23319 gen_rtx_GE (VOIDmode,
23320 op0, op1),
23321 true_cond, false_cond)));
23322 return 1;
23325 /* Same as above, but for ints (isel). */
23328 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23330 rtx condition_rtx, cr;
23331 machine_mode mode = GET_MODE (dest);
23332 enum rtx_code cond_code;
23333 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23334 bool signedp;
23336 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23337 return 0;
23339 /* We still have to do the compare, because isel doesn't do a
23340 compare, it just looks at the CRx bits set by a previous compare
23341 instruction. */
23342 condition_rtx = rs6000_generate_compare (op, mode);
23343 cond_code = GET_CODE (condition_rtx);
23344 cr = XEXP (condition_rtx, 0);
23345 signedp = GET_MODE (cr) == CCmode;
23347 isel_func = (mode == SImode
23348 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23349 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23351 switch (cond_code)
23353 case LT: case GT: case LTU: case GTU: case EQ:
23354 /* isel handles these directly. */
23355 break;
23357 default:
23358 /* We need to swap the sense of the comparison. */
23360 std::swap (false_cond, true_cond);
23361 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23363 break;
23366 false_cond = force_reg (mode, false_cond);
23367 if (true_cond != const0_rtx)
23368 true_cond = force_reg (mode, true_cond);
23370 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23372 return 1;
23375 void
23376 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23378 machine_mode mode = GET_MODE (op0);
23379 enum rtx_code c;
23380 rtx target;
23382 /* VSX/altivec have direct min/max insns. */
23383 if ((code == SMAX || code == SMIN)
23384 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23385 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23387 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23388 return;
23391 if (code == SMAX || code == SMIN)
23392 c = GE;
23393 else
23394 c = GEU;
23396 if (code == SMAX || code == UMAX)
23397 target = emit_conditional_move (dest, c, op0, op1, mode,
23398 op0, op1, mode, 0);
23399 else
23400 target = emit_conditional_move (dest, c, op0, op1, mode,
23401 op1, op0, mode, 0);
23402 gcc_assert (target);
23403 if (target != dest)
23404 emit_move_insn (dest, target);
23407 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23408 for the value to come from memory or if it is already loaded into a GPR. */
23410 void
23411 rs6000_split_signbit (rtx dest, rtx src)
23413 machine_mode d_mode = GET_MODE (dest);
23414 machine_mode s_mode = GET_MODE (src);
23415 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23416 rtx shift_reg = dest_di;
23418 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23420 if (MEM_P (src))
23422 rtx mem = (WORDS_BIG_ENDIAN
23423 ? adjust_address (src, DImode, 0)
23424 : adjust_address (src, DImode, 8));
23425 emit_insn (gen_rtx_SET (dest_di, mem));
23428 else
23430 unsigned int r = reg_or_subregno (src);
23432 if (INT_REGNO_P (r))
23433 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23435 else
23437 /* Generate the special mfvsrd instruction to get it in a GPR. */
23438 gcc_assert (VSX_REGNO_P (r));
23439 if (s_mode == KFmode)
23440 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23441 else
23442 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23446 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23447 return;
23450 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23451 COND is true. Mark the jump as unlikely to be taken. */
23453 static void
23454 emit_unlikely_jump (rtx cond, rtx label)
23456 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23457 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23458 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23461 /* A subroutine of the atomic operation splitters. Emit a load-locked
23462 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23463 the zero_extend operation. */
23465 static void
23466 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23468 rtx (*fn) (rtx, rtx) = NULL;
23470 switch (mode)
23472 case E_QImode:
23473 fn = gen_load_lockedqi;
23474 break;
23475 case E_HImode:
23476 fn = gen_load_lockedhi;
23477 break;
23478 case E_SImode:
23479 if (GET_MODE (mem) == QImode)
23480 fn = gen_load_lockedqi_si;
23481 else if (GET_MODE (mem) == HImode)
23482 fn = gen_load_lockedhi_si;
23483 else
23484 fn = gen_load_lockedsi;
23485 break;
23486 case E_DImode:
23487 fn = gen_load_lockeddi;
23488 break;
23489 case E_TImode:
23490 fn = gen_load_lockedti;
23491 break;
23492 default:
23493 gcc_unreachable ();
23495 emit_insn (fn (reg, mem));
23498 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23499 instruction in MODE. */
23501 static void
23502 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23504 rtx (*fn) (rtx, rtx, rtx) = NULL;
23506 switch (mode)
23508 case E_QImode:
23509 fn = gen_store_conditionalqi;
23510 break;
23511 case E_HImode:
23512 fn = gen_store_conditionalhi;
23513 break;
23514 case E_SImode:
23515 fn = gen_store_conditionalsi;
23516 break;
23517 case E_DImode:
23518 fn = gen_store_conditionaldi;
23519 break;
23520 case E_TImode:
23521 fn = gen_store_conditionalti;
23522 break;
23523 default:
23524 gcc_unreachable ();
23527 /* Emit sync before stwcx. to address PPC405 Erratum. */
23528 if (PPC405_ERRATUM77)
23529 emit_insn (gen_hwsync ());
23531 emit_insn (fn (res, mem, val));
23534 /* Expand barriers before and after a load_locked/store_cond sequence. */
23536 static rtx
23537 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23539 rtx addr = XEXP (mem, 0);
23541 if (!legitimate_indirect_address_p (addr, reload_completed)
23542 && !legitimate_indexed_address_p (addr, reload_completed))
23544 addr = force_reg (Pmode, addr);
23545 mem = replace_equiv_address_nv (mem, addr);
23548 switch (model)
23550 case MEMMODEL_RELAXED:
23551 case MEMMODEL_CONSUME:
23552 case MEMMODEL_ACQUIRE:
23553 break;
23554 case MEMMODEL_RELEASE:
23555 case MEMMODEL_ACQ_REL:
23556 emit_insn (gen_lwsync ());
23557 break;
23558 case MEMMODEL_SEQ_CST:
23559 emit_insn (gen_hwsync ());
23560 break;
23561 default:
23562 gcc_unreachable ();
23564 return mem;
23567 static void
23568 rs6000_post_atomic_barrier (enum memmodel model)
23570 switch (model)
23572 case MEMMODEL_RELAXED:
23573 case MEMMODEL_CONSUME:
23574 case MEMMODEL_RELEASE:
23575 break;
23576 case MEMMODEL_ACQUIRE:
23577 case MEMMODEL_ACQ_REL:
23578 case MEMMODEL_SEQ_CST:
23579 emit_insn (gen_isync ());
23580 break;
23581 default:
23582 gcc_unreachable ();
23586 /* A subroutine of the various atomic expanders. For sub-word operations,
23587 we must adjust things to operate on SImode. Given the original MEM,
23588 return a new aligned memory. Also build and return the quantities by
23589 which to shift and mask. */
23591 static rtx
23592 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23594 rtx addr, align, shift, mask, mem;
23595 HOST_WIDE_INT shift_mask;
23596 machine_mode mode = GET_MODE (orig_mem);
23598 /* For smaller modes, we have to implement this via SImode. */
23599 shift_mask = (mode == QImode ? 0x18 : 0x10);
23601 addr = XEXP (orig_mem, 0);
23602 addr = force_reg (GET_MODE (addr), addr);
23604 /* Aligned memory containing subword. Generate a new memory. We
23605 do not want any of the existing MEM_ATTR data, as we're now
23606 accessing memory outside the original object. */
23607 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23608 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23609 mem = gen_rtx_MEM (SImode, align);
23610 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23611 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23612 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23614 /* Shift amount for subword relative to aligned word. */
23615 shift = gen_reg_rtx (SImode);
23616 addr = gen_lowpart (SImode, addr);
23617 rtx tmp = gen_reg_rtx (SImode);
23618 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23619 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23620 if (BYTES_BIG_ENDIAN)
23621 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23622 shift, 1, OPTAB_LIB_WIDEN);
23623 *pshift = shift;
23625 /* Mask for insertion. */
23626 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23627 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23628 *pmask = mask;
23630 return mem;
23633 /* A subroutine of the various atomic expanders. For sub-word operands,
23634 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23636 static rtx
23637 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23639 rtx x;
23641 x = gen_reg_rtx (SImode);
23642 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23643 gen_rtx_NOT (SImode, mask),
23644 oldval)));
23646 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23648 return x;
23651 /* A subroutine of the various atomic expanders. For sub-word operands,
23652 extract WIDE to NARROW via SHIFT. */
23654 static void
23655 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23657 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23658 wide, 1, OPTAB_LIB_WIDEN);
23659 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23662 /* Expand an atomic compare and swap operation. */
23664 void
23665 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23667 rtx boolval, retval, mem, oldval, newval, cond;
23668 rtx label1, label2, x, mask, shift;
23669 machine_mode mode, orig_mode;
23670 enum memmodel mod_s, mod_f;
23671 bool is_weak;
23673 boolval = operands[0];
23674 retval = operands[1];
23675 mem = operands[2];
23676 oldval = operands[3];
23677 newval = operands[4];
23678 is_weak = (INTVAL (operands[5]) != 0);
23679 mod_s = memmodel_base (INTVAL (operands[6]));
23680 mod_f = memmodel_base (INTVAL (operands[7]));
23681 orig_mode = mode = GET_MODE (mem);
23683 mask = shift = NULL_RTX;
23684 if (mode == QImode || mode == HImode)
23686 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23687 lwarx and shift/mask operations. With power8, we need to do the
23688 comparison in SImode, but the store is still done in QI/HImode. */
23689 oldval = convert_modes (SImode, mode, oldval, 1);
23691 if (!TARGET_SYNC_HI_QI)
23693 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23695 /* Shift and mask OLDVAL into position with the word. */
23696 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23697 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23699 /* Shift and mask NEWVAL into position within the word. */
23700 newval = convert_modes (SImode, mode, newval, 1);
23701 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23702 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23705 /* Prepare to adjust the return value. */
23706 retval = gen_reg_rtx (SImode);
23707 mode = SImode;
23709 else if (reg_overlap_mentioned_p (retval, oldval))
23710 oldval = copy_to_reg (oldval);
23712 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23713 oldval = copy_to_mode_reg (mode, oldval);
23715 if (reg_overlap_mentioned_p (retval, newval))
23716 newval = copy_to_reg (newval);
23718 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23720 label1 = NULL_RTX;
23721 if (!is_weak)
23723 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23724 emit_label (XEXP (label1, 0));
23726 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23728 emit_load_locked (mode, retval, mem);
23730 x = retval;
23731 if (mask)
23732 x = expand_simple_binop (SImode, AND, retval, mask,
23733 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23735 cond = gen_reg_rtx (CCmode);
23736 /* If we have TImode, synthesize a comparison. */
23737 if (mode != TImode)
23738 x = gen_rtx_COMPARE (CCmode, x, oldval);
23739 else
23741 rtx xor1_result = gen_reg_rtx (DImode);
23742 rtx xor2_result = gen_reg_rtx (DImode);
23743 rtx or_result = gen_reg_rtx (DImode);
23744 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23745 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23746 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23747 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23749 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23750 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23751 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23752 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23755 emit_insn (gen_rtx_SET (cond, x));
23757 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23758 emit_unlikely_jump (x, label2);
23760 x = newval;
23761 if (mask)
23762 x = rs6000_mask_atomic_subword (retval, newval, mask);
23764 emit_store_conditional (orig_mode, cond, mem, x);
23766 if (!is_weak)
23768 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23769 emit_unlikely_jump (x, label1);
23772 if (!is_mm_relaxed (mod_f))
23773 emit_label (XEXP (label2, 0));
23775 rs6000_post_atomic_barrier (mod_s);
23777 if (is_mm_relaxed (mod_f))
23778 emit_label (XEXP (label2, 0));
23780 if (shift)
23781 rs6000_finish_atomic_subword (operands[1], retval, shift);
23782 else if (mode != GET_MODE (operands[1]))
23783 convert_move (operands[1], retval, 1);
23785 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23786 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23787 emit_insn (gen_rtx_SET (boolval, x));
23790 /* Expand an atomic exchange operation. */
23792 void
23793 rs6000_expand_atomic_exchange (rtx operands[])
23795 rtx retval, mem, val, cond;
23796 machine_mode mode;
23797 enum memmodel model;
23798 rtx label, x, mask, shift;
23800 retval = operands[0];
23801 mem = operands[1];
23802 val = operands[2];
23803 model = memmodel_base (INTVAL (operands[3]));
23804 mode = GET_MODE (mem);
23806 mask = shift = NULL_RTX;
23807 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23809 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23811 /* Shift and mask VAL into position with the word. */
23812 val = convert_modes (SImode, mode, val, 1);
23813 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23814 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23816 /* Prepare to adjust the return value. */
23817 retval = gen_reg_rtx (SImode);
23818 mode = SImode;
23821 mem = rs6000_pre_atomic_barrier (mem, model);
23823 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23824 emit_label (XEXP (label, 0));
23826 emit_load_locked (mode, retval, mem);
23828 x = val;
23829 if (mask)
23830 x = rs6000_mask_atomic_subword (retval, val, mask);
23832 cond = gen_reg_rtx (CCmode);
23833 emit_store_conditional (mode, cond, mem, x);
23835 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23836 emit_unlikely_jump (x, label);
23838 rs6000_post_atomic_barrier (model);
23840 if (shift)
23841 rs6000_finish_atomic_subword (operands[0], retval, shift);
23844 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23845 to perform. MEM is the memory on which to operate. VAL is the second
23846 operand of the binary operator. BEFORE and AFTER are optional locations to
23847 return the value of MEM either before of after the operation. MODEL_RTX
23848 is a CONST_INT containing the memory model to use. */
23850 void
23851 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23852 rtx orig_before, rtx orig_after, rtx model_rtx)
23854 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23855 machine_mode mode = GET_MODE (mem);
23856 machine_mode store_mode = mode;
23857 rtx label, x, cond, mask, shift;
23858 rtx before = orig_before, after = orig_after;
23860 mask = shift = NULL_RTX;
23861 /* On power8, we want to use SImode for the operation. On previous systems,
23862 use the operation in a subword and shift/mask to get the proper byte or
23863 halfword. */
23864 if (mode == QImode || mode == HImode)
23866 if (TARGET_SYNC_HI_QI)
23868 val = convert_modes (SImode, mode, val, 1);
23870 /* Prepare to adjust the return value. */
23871 before = gen_reg_rtx (SImode);
23872 if (after)
23873 after = gen_reg_rtx (SImode);
23874 mode = SImode;
23876 else
23878 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23880 /* Shift and mask VAL into position with the word. */
23881 val = convert_modes (SImode, mode, val, 1);
23882 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23883 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23885 switch (code)
23887 case IOR:
23888 case XOR:
23889 /* We've already zero-extended VAL. That is sufficient to
23890 make certain that it does not affect other bits. */
23891 mask = NULL;
23892 break;
23894 case AND:
23895 /* If we make certain that all of the other bits in VAL are
23896 set, that will be sufficient to not affect other bits. */
23897 x = gen_rtx_NOT (SImode, mask);
23898 x = gen_rtx_IOR (SImode, x, val);
23899 emit_insn (gen_rtx_SET (val, x));
23900 mask = NULL;
23901 break;
23903 case NOT:
23904 case PLUS:
23905 case MINUS:
23906 /* These will all affect bits outside the field and need
23907 adjustment via MASK within the loop. */
23908 break;
23910 default:
23911 gcc_unreachable ();
23914 /* Prepare to adjust the return value. */
23915 before = gen_reg_rtx (SImode);
23916 if (after)
23917 after = gen_reg_rtx (SImode);
23918 store_mode = mode = SImode;
23922 mem = rs6000_pre_atomic_barrier (mem, model);
23924 label = gen_label_rtx ();
23925 emit_label (label);
23926 label = gen_rtx_LABEL_REF (VOIDmode, label);
23928 if (before == NULL_RTX)
23929 before = gen_reg_rtx (mode);
23931 emit_load_locked (mode, before, mem);
23933 if (code == NOT)
23935 x = expand_simple_binop (mode, AND, before, val,
23936 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23937 after = expand_simple_unop (mode, NOT, x, after, 1);
23939 else
23941 after = expand_simple_binop (mode, code, before, val,
23942 after, 1, OPTAB_LIB_WIDEN);
23945 x = after;
23946 if (mask)
23948 x = expand_simple_binop (SImode, AND, after, mask,
23949 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23950 x = rs6000_mask_atomic_subword (before, x, mask);
23952 else if (store_mode != mode)
23953 x = convert_modes (store_mode, mode, x, 1);
23955 cond = gen_reg_rtx (CCmode);
23956 emit_store_conditional (store_mode, cond, mem, x);
23958 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23959 emit_unlikely_jump (x, label);
23961 rs6000_post_atomic_barrier (model);
23963 if (shift)
23965 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23966 then do the calcuations in a SImode register. */
23967 if (orig_before)
23968 rs6000_finish_atomic_subword (orig_before, before, shift);
23969 if (orig_after)
23970 rs6000_finish_atomic_subword (orig_after, after, shift);
23972 else if (store_mode != mode)
23974 /* QImode/HImode on machines with lbarx/lharx where we do the native
23975 operation and then do the calcuations in a SImode register. */
23976 if (orig_before)
23977 convert_move (orig_before, before, 1);
23978 if (orig_after)
23979 convert_move (orig_after, after, 1);
23981 else if (orig_after && after != orig_after)
23982 emit_move_insn (orig_after, after);
23985 /* Emit instructions to move SRC to DST. Called by splitters for
23986 multi-register moves. It will emit at most one instruction for
23987 each register that is accessed; that is, it won't emit li/lis pairs
23988 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23989 register. */
23991 void
23992 rs6000_split_multireg_move (rtx dst, rtx src)
23994 /* The register number of the first register being moved. */
23995 int reg;
23996 /* The mode that is to be moved. */
23997 machine_mode mode;
23998 /* The mode that the move is being done in, and its size. */
23999 machine_mode reg_mode;
24000 int reg_mode_size;
24001 /* The number of registers that will be moved. */
24002 int nregs;
24004 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
24005 mode = GET_MODE (dst);
24006 nregs = hard_regno_nregs (reg, mode);
24007 if (FP_REGNO_P (reg))
24008 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
24009 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
24010 else if (ALTIVEC_REGNO_P (reg))
24011 reg_mode = V16QImode;
24012 else
24013 reg_mode = word_mode;
24014 reg_mode_size = GET_MODE_SIZE (reg_mode);
24016 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
24018 /* TDmode residing in FP registers is special, since the ISA requires that
24019 the lower-numbered word of a register pair is always the most significant
24020 word, even in little-endian mode. This does not match the usual subreg
24021 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
24022 the appropriate constituent registers "by hand" in little-endian mode.
24024 Note we do not need to check for destructive overlap here since TDmode
24025 can only reside in even/odd register pairs. */
24026 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
24028 rtx p_src, p_dst;
24029 int i;
24031 for (i = 0; i < nregs; i++)
24033 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
24034 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
24035 else
24036 p_src = simplify_gen_subreg (reg_mode, src, mode,
24037 i * reg_mode_size);
24039 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
24040 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
24041 else
24042 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
24043 i * reg_mode_size);
24045 emit_insn (gen_rtx_SET (p_dst, p_src));
24048 return;
24051 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
24053 /* Move register range backwards, if we might have destructive
24054 overlap. */
24055 int i;
24056 for (i = nregs - 1; i >= 0; i--)
24057 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24058 i * reg_mode_size),
24059 simplify_gen_subreg (reg_mode, src, mode,
24060 i * reg_mode_size)));
24062 else
24064 int i;
24065 int j = -1;
24066 bool used_update = false;
24067 rtx restore_basereg = NULL_RTX;
24069 if (MEM_P (src) && INT_REGNO_P (reg))
24071 rtx breg;
24073 if (GET_CODE (XEXP (src, 0)) == PRE_INC
24074 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
24076 rtx delta_rtx;
24077 breg = XEXP (XEXP (src, 0), 0);
24078 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
24079 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
24080 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
24081 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24082 src = replace_equiv_address (src, breg);
24084 else if (! rs6000_offsettable_memref_p (src, reg_mode))
24086 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
24088 rtx basereg = XEXP (XEXP (src, 0), 0);
24089 if (TARGET_UPDATE)
24091 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
24092 emit_insn (gen_rtx_SET (ndst,
24093 gen_rtx_MEM (reg_mode,
24094 XEXP (src, 0))));
24095 used_update = true;
24097 else
24098 emit_insn (gen_rtx_SET (basereg,
24099 XEXP (XEXP (src, 0), 1)));
24100 src = replace_equiv_address (src, basereg);
24102 else
24104 rtx basereg = gen_rtx_REG (Pmode, reg);
24105 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24106 src = replace_equiv_address (src, basereg);
24110 breg = XEXP (src, 0);
24111 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24112 breg = XEXP (breg, 0);
24114 /* If the base register we are using to address memory is
24115 also a destination reg, then change that register last. */
24116 if (REG_P (breg)
24117 && REGNO (breg) >= REGNO (dst)
24118 && REGNO (breg) < REGNO (dst) + nregs)
24119 j = REGNO (breg) - REGNO (dst);
24121 else if (MEM_P (dst) && INT_REGNO_P (reg))
24123 rtx breg;
24125 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24126 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24128 rtx delta_rtx;
24129 breg = XEXP (XEXP (dst, 0), 0);
24130 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24131 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24132 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24134 /* We have to update the breg before doing the store.
24135 Use store with update, if available. */
24137 if (TARGET_UPDATE)
24139 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24140 emit_insn (TARGET_32BIT
24141 ? (TARGET_POWERPC64
24142 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24143 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24144 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24145 used_update = true;
24147 else
24148 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24149 dst = replace_equiv_address (dst, breg);
24151 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24152 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24154 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24156 rtx basereg = XEXP (XEXP (dst, 0), 0);
24157 if (TARGET_UPDATE)
24159 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24160 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24161 XEXP (dst, 0)),
24162 nsrc));
24163 used_update = true;
24165 else
24166 emit_insn (gen_rtx_SET (basereg,
24167 XEXP (XEXP (dst, 0), 1)));
24168 dst = replace_equiv_address (dst, basereg);
24170 else
24172 rtx basereg = XEXP (XEXP (dst, 0), 0);
24173 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24174 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24175 && REG_P (basereg)
24176 && REG_P (offsetreg)
24177 && REGNO (basereg) != REGNO (offsetreg));
24178 if (REGNO (basereg) == 0)
24180 rtx tmp = offsetreg;
24181 offsetreg = basereg;
24182 basereg = tmp;
24184 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24185 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24186 dst = replace_equiv_address (dst, basereg);
24189 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24190 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24193 for (i = 0; i < nregs; i++)
24195 /* Calculate index to next subword. */
24196 ++j;
24197 if (j == nregs)
24198 j = 0;
24200 /* If compiler already emitted move of first word by
24201 store with update, no need to do anything. */
24202 if (j == 0 && used_update)
24203 continue;
24205 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24206 j * reg_mode_size),
24207 simplify_gen_subreg (reg_mode, src, mode,
24208 j * reg_mode_size)));
24210 if (restore_basereg != NULL_RTX)
24211 emit_insn (restore_basereg);
24216 /* This page contains routines that are used to determine what the
24217 function prologue and epilogue code will do and write them out. */
24219 /* Determine whether the REG is really used. */
24221 static bool
24222 save_reg_p (int reg)
24224 /* We need to mark the PIC offset register live for the same conditions
24225 as it is set up, or otherwise it won't be saved before we clobber it. */
24227 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24229 /* When calling eh_return, we must return true for all the cases
24230 where conditional_register_usage marks the PIC offset reg
24231 call used. */
24232 if (TARGET_TOC && TARGET_MINIMAL_TOC
24233 && (crtl->calls_eh_return
24234 || df_regs_ever_live_p (reg)
24235 || !constant_pool_empty_p ()))
24236 return true;
24238 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24239 && flag_pic)
24240 return true;
24243 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24246 /* Return the first fixed-point register that is required to be
24247 saved. 32 if none. */
24250 first_reg_to_save (void)
24252 int first_reg;
24254 /* Find lowest numbered live register. */
24255 for (first_reg = 13; first_reg <= 31; first_reg++)
24256 if (save_reg_p (first_reg))
24257 break;
24259 #if TARGET_MACHO
24260 if (flag_pic
24261 && crtl->uses_pic_offset_table
24262 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24263 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24264 #endif
24266 return first_reg;
24269 /* Similar, for FP regs. */
24272 first_fp_reg_to_save (void)
24274 int first_reg;
24276 /* Find lowest numbered live register. */
24277 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24278 if (save_reg_p (first_reg))
24279 break;
24281 return first_reg;
24284 /* Similar, for AltiVec regs. */
24286 static int
24287 first_altivec_reg_to_save (void)
24289 int i;
24291 /* Stack frame remains as is unless we are in AltiVec ABI. */
24292 if (! TARGET_ALTIVEC_ABI)
24293 return LAST_ALTIVEC_REGNO + 1;
24295 /* On Darwin, the unwind routines are compiled without
24296 TARGET_ALTIVEC, and use save_world to save/restore the
24297 altivec registers when necessary. */
24298 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24299 && ! TARGET_ALTIVEC)
24300 return FIRST_ALTIVEC_REGNO + 20;
24302 /* Find lowest numbered live register. */
24303 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24304 if (save_reg_p (i))
24305 break;
24307 return i;
24310 /* Return a 32-bit mask of the AltiVec registers we need to set in
24311 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24312 the 32-bit word is 0. */
24314 static unsigned int
24315 compute_vrsave_mask (void)
24317 unsigned int i, mask = 0;
24319 /* On Darwin, the unwind routines are compiled without
24320 TARGET_ALTIVEC, and use save_world to save/restore the
24321 call-saved altivec registers when necessary. */
24322 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24323 && ! TARGET_ALTIVEC)
24324 mask |= 0xFFF;
24326 /* First, find out if we use _any_ altivec registers. */
24327 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24328 if (df_regs_ever_live_p (i))
24329 mask |= ALTIVEC_REG_BIT (i);
24331 if (mask == 0)
24332 return mask;
24334 /* Next, remove the argument registers from the set. These must
24335 be in the VRSAVE mask set by the caller, so we don't need to add
24336 them in again. More importantly, the mask we compute here is
24337 used to generate CLOBBERs in the set_vrsave insn, and we do not
24338 wish the argument registers to die. */
24339 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24340 mask &= ~ALTIVEC_REG_BIT (i);
24342 /* Similarly, remove the return value from the set. */
24344 bool yes = false;
24345 diddle_return_value (is_altivec_return_reg, &yes);
24346 if (yes)
24347 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24350 return mask;
24353 /* For a very restricted set of circumstances, we can cut down the
24354 size of prologues/epilogues by calling our own save/restore-the-world
24355 routines. */
24357 static void
24358 compute_save_world_info (rs6000_stack_t *info)
24360 info->world_save_p = 1;
24361 info->world_save_p
24362 = (WORLD_SAVE_P (info)
24363 && DEFAULT_ABI == ABI_DARWIN
24364 && !cfun->has_nonlocal_label
24365 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24366 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24367 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24368 && info->cr_save_p);
24370 /* This will not work in conjunction with sibcalls. Make sure there
24371 are none. (This check is expensive, but seldom executed.) */
24372 if (WORLD_SAVE_P (info))
24374 rtx_insn *insn;
24375 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24376 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24378 info->world_save_p = 0;
24379 break;
24383 if (WORLD_SAVE_P (info))
24385 /* Even if we're not touching VRsave, make sure there's room on the
24386 stack for it, if it looks like we're calling SAVE_WORLD, which
24387 will attempt to save it. */
24388 info->vrsave_size = 4;
24390 /* If we are going to save the world, we need to save the link register too. */
24391 info->lr_save_p = 1;
24393 /* "Save" the VRsave register too if we're saving the world. */
24394 if (info->vrsave_mask == 0)
24395 info->vrsave_mask = compute_vrsave_mask ();
24397 /* Because the Darwin register save/restore routines only handle
24398 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24399 check. */
24400 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24401 && (info->first_altivec_reg_save
24402 >= FIRST_SAVED_ALTIVEC_REGNO));
24405 return;
24409 static void
24410 is_altivec_return_reg (rtx reg, void *xyes)
24412 bool *yes = (bool *) xyes;
24413 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24414 *yes = true;
24418 /* Return whether REG is a global user reg or has been specifed by
24419 -ffixed-REG. We should not restore these, and so cannot use
24420 lmw or out-of-line restore functions if there are any. We also
24421 can't save them (well, emit frame notes for them), because frame
24422 unwinding during exception handling will restore saved registers. */
24424 static bool
24425 fixed_reg_p (int reg)
24427 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24428 backend sets it, overriding anything the user might have given. */
24429 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24430 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24431 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24432 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24433 return false;
24435 return fixed_regs[reg];
24438 /* Determine the strategy for savings/restoring registers. */
24440 enum {
24441 SAVE_MULTIPLE = 0x1,
24442 SAVE_INLINE_GPRS = 0x2,
24443 SAVE_INLINE_FPRS = 0x4,
24444 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24445 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24446 SAVE_INLINE_VRS = 0x20,
24447 REST_MULTIPLE = 0x100,
24448 REST_INLINE_GPRS = 0x200,
24449 REST_INLINE_FPRS = 0x400,
24450 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24451 REST_INLINE_VRS = 0x1000
24454 static int
24455 rs6000_savres_strategy (rs6000_stack_t *info,
24456 bool using_static_chain_p)
24458 int strategy = 0;
24460 /* Select between in-line and out-of-line save and restore of regs.
24461 First, all the obvious cases where we don't use out-of-line. */
24462 if (crtl->calls_eh_return
24463 || cfun->machine->ra_need_lr)
24464 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24465 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24466 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24468 if (info->first_gp_reg_save == 32)
24469 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24471 if (info->first_fp_reg_save == 64
24472 /* The out-of-line FP routines use double-precision stores;
24473 we can't use those routines if we don't have such stores. */
24474 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24475 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24477 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24478 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24480 /* Define cutoff for using out-of-line functions to save registers. */
24481 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24483 if (!optimize_size)
24485 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24486 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24487 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24489 else
24491 /* Prefer out-of-line restore if it will exit. */
24492 if (info->first_fp_reg_save > 61)
24493 strategy |= SAVE_INLINE_FPRS;
24494 if (info->first_gp_reg_save > 29)
24496 if (info->first_fp_reg_save == 64)
24497 strategy |= SAVE_INLINE_GPRS;
24498 else
24499 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24501 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24502 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24505 else if (DEFAULT_ABI == ABI_DARWIN)
24507 if (info->first_fp_reg_save > 60)
24508 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24509 if (info->first_gp_reg_save > 29)
24510 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24511 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24513 else
24515 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24516 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24517 || info->first_fp_reg_save > 61)
24518 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24519 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24520 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24523 /* Don't bother to try to save things out-of-line if r11 is occupied
24524 by the static chain. It would require too much fiddling and the
24525 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24526 pointer on Darwin, and AIX uses r1 or r12. */
24527 if (using_static_chain_p
24528 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24529 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24530 | SAVE_INLINE_GPRS
24531 | SAVE_INLINE_VRS);
24533 /* Don't ever restore fixed regs. That means we can't use the
24534 out-of-line register restore functions if a fixed reg is in the
24535 range of regs restored. */
24536 if (!(strategy & REST_INLINE_FPRS))
24537 for (int i = info->first_fp_reg_save; i < 64; i++)
24538 if (fixed_regs[i])
24540 strategy |= REST_INLINE_FPRS;
24541 break;
24544 /* We can only use the out-of-line routines to restore fprs if we've
24545 saved all the registers from first_fp_reg_save in the prologue.
24546 Otherwise, we risk loading garbage. Of course, if we have saved
24547 out-of-line then we know we haven't skipped any fprs. */
24548 if ((strategy & SAVE_INLINE_FPRS)
24549 && !(strategy & REST_INLINE_FPRS))
24550 for (int i = info->first_fp_reg_save; i < 64; i++)
24551 if (!save_reg_p (i))
24553 strategy |= REST_INLINE_FPRS;
24554 break;
24557 /* Similarly, for altivec regs. */
24558 if (!(strategy & REST_INLINE_VRS))
24559 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24560 if (fixed_regs[i])
24562 strategy |= REST_INLINE_VRS;
24563 break;
24566 if ((strategy & SAVE_INLINE_VRS)
24567 && !(strategy & REST_INLINE_VRS))
24568 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24569 if (!save_reg_p (i))
24571 strategy |= REST_INLINE_VRS;
24572 break;
24575 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24576 saved is an out-of-line save or restore. Set up the value for
24577 the next test (excluding out-of-line gprs). */
24578 bool lr_save_p = (info->lr_save_p
24579 || !(strategy & SAVE_INLINE_FPRS)
24580 || !(strategy & SAVE_INLINE_VRS)
24581 || !(strategy & REST_INLINE_FPRS)
24582 || !(strategy & REST_INLINE_VRS));
24584 if (TARGET_MULTIPLE
24585 && !TARGET_POWERPC64
24586 && info->first_gp_reg_save < 31
24587 && !(flag_shrink_wrap
24588 && flag_shrink_wrap_separate
24589 && optimize_function_for_speed_p (cfun)))
24591 int count = 0;
24592 for (int i = info->first_gp_reg_save; i < 32; i++)
24593 if (save_reg_p (i))
24594 count++;
24596 if (count <= 1)
24597 /* Don't use store multiple if only one reg needs to be
24598 saved. This can occur for example when the ABI_V4 pic reg
24599 (r30) needs to be saved to make calls, but r31 is not
24600 used. */
24601 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24602 else
24604 /* Prefer store multiple for saves over out-of-line
24605 routines, since the store-multiple instruction will
24606 always be smaller. */
24607 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24609 /* The situation is more complicated with load multiple.
24610 We'd prefer to use the out-of-line routines for restores,
24611 since the "exit" out-of-line routines can handle the
24612 restore of LR and the frame teardown. However if doesn't
24613 make sense to use the out-of-line routine if that is the
24614 only reason we'd need to save LR, and we can't use the
24615 "exit" out-of-line gpr restore if we have saved some
24616 fprs; In those cases it is advantageous to use load
24617 multiple when available. */
24618 if (info->first_fp_reg_save != 64 || !lr_save_p)
24619 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24623 /* Using the "exit" out-of-line routine does not improve code size
24624 if using it would require lr to be saved and if only saving one
24625 or two gprs. */
24626 else if (!lr_save_p && info->first_gp_reg_save > 29)
24627 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24629 /* Don't ever restore fixed regs. */
24630 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24631 for (int i = info->first_gp_reg_save; i < 32; i++)
24632 if (fixed_reg_p (i))
24634 strategy |= REST_INLINE_GPRS;
24635 strategy &= ~REST_MULTIPLE;
24636 break;
24639 /* We can only use load multiple or the out-of-line routines to
24640 restore gprs if we've saved all the registers from
24641 first_gp_reg_save. Otherwise, we risk loading garbage.
24642 Of course, if we have saved out-of-line or used stmw then we know
24643 we haven't skipped any gprs. */
24644 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24645 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24646 for (int i = info->first_gp_reg_save; i < 32; i++)
24647 if (!save_reg_p (i))
24649 strategy |= REST_INLINE_GPRS;
24650 strategy &= ~REST_MULTIPLE;
24651 break;
24654 if (TARGET_ELF && TARGET_64BIT)
24656 if (!(strategy & SAVE_INLINE_FPRS))
24657 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24658 else if (!(strategy & SAVE_INLINE_GPRS)
24659 && info->first_fp_reg_save == 64)
24660 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24662 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24663 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24665 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24666 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24668 return strategy;
24671 /* Calculate the stack information for the current function. This is
24672 complicated by having two separate calling sequences, the AIX calling
24673 sequence and the V.4 calling sequence.
24675 AIX (and Darwin/Mac OS X) stack frames look like:
24676 32-bit 64-bit
24677 SP----> +---------------------------------------+
24678 | back chain to caller | 0 0
24679 +---------------------------------------+
24680 | saved CR | 4 8 (8-11)
24681 +---------------------------------------+
24682 | saved LR | 8 16
24683 +---------------------------------------+
24684 | reserved for compilers | 12 24
24685 +---------------------------------------+
24686 | reserved for binders | 16 32
24687 +---------------------------------------+
24688 | saved TOC pointer | 20 40
24689 +---------------------------------------+
24690 | Parameter save area (+padding*) (P) | 24 48
24691 +---------------------------------------+
24692 | Alloca space (A) | 24+P etc.
24693 +---------------------------------------+
24694 | Local variable space (L) | 24+P+A
24695 +---------------------------------------+
24696 | Float/int conversion temporary (X) | 24+P+A+L
24697 +---------------------------------------+
24698 | Save area for AltiVec registers (W) | 24+P+A+L+X
24699 +---------------------------------------+
24700 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24701 +---------------------------------------+
24702 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24703 +---------------------------------------+
24704 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24705 +---------------------------------------+
24706 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24707 +---------------------------------------+
24708 old SP->| back chain to caller's caller |
24709 +---------------------------------------+
24711 * If the alloca area is present, the parameter save area is
24712 padded so that the former starts 16-byte aligned.
24714 The required alignment for AIX configurations is two words (i.e., 8
24715 or 16 bytes).
24717 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24719 SP----> +---------------------------------------+
24720 | Back chain to caller | 0
24721 +---------------------------------------+
24722 | Save area for CR | 8
24723 +---------------------------------------+
24724 | Saved LR | 16
24725 +---------------------------------------+
24726 | Saved TOC pointer | 24
24727 +---------------------------------------+
24728 | Parameter save area (+padding*) (P) | 32
24729 +---------------------------------------+
24730 | Alloca space (A) | 32+P
24731 +---------------------------------------+
24732 | Local variable space (L) | 32+P+A
24733 +---------------------------------------+
24734 | Save area for AltiVec registers (W) | 32+P+A+L
24735 +---------------------------------------+
24736 | AltiVec alignment padding (Y) | 32+P+A+L+W
24737 +---------------------------------------+
24738 | Save area for GP registers (G) | 32+P+A+L+W+Y
24739 +---------------------------------------+
24740 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24741 +---------------------------------------+
24742 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24743 +---------------------------------------+
24745 * If the alloca area is present, the parameter save area is
24746 padded so that the former starts 16-byte aligned.
24748 V.4 stack frames look like:
24750 SP----> +---------------------------------------+
24751 | back chain to caller | 0
24752 +---------------------------------------+
24753 | caller's saved LR | 4
24754 +---------------------------------------+
24755 | Parameter save area (+padding*) (P) | 8
24756 +---------------------------------------+
24757 | Alloca space (A) | 8+P
24758 +---------------------------------------+
24759 | Varargs save area (V) | 8+P+A
24760 +---------------------------------------+
24761 | Local variable space (L) | 8+P+A+V
24762 +---------------------------------------+
24763 | Float/int conversion temporary (X) | 8+P+A+V+L
24764 +---------------------------------------+
24765 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24766 +---------------------------------------+
24767 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24768 +---------------------------------------+
24769 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24770 +---------------------------------------+
24771 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24772 +---------------------------------------+
24773 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24774 +---------------------------------------+
24775 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24776 +---------------------------------------+
24777 old SP->| back chain to caller's caller |
24778 +---------------------------------------+
24780 * If the alloca area is present and the required alignment is
24781 16 bytes, the parameter save area is padded so that the
24782 alloca area starts 16-byte aligned.
24784 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24785 given. (But note below and in sysv4.h that we require only 8 and
24786 may round up the size of our stack frame anyways. The historical
24787 reason is early versions of powerpc-linux which didn't properly
24788 align the stack at program startup. A happy side-effect is that
24789 -mno-eabi libraries can be used with -meabi programs.)
24791 The EABI configuration defaults to the V.4 layout. However,
24792 the stack alignment requirements may differ. If -mno-eabi is not
24793 given, the required stack alignment is 8 bytes; if -mno-eabi is
24794 given, the required alignment is 16 bytes. (But see V.4 comment
24795 above.) */
24797 #ifndef ABI_STACK_BOUNDARY
24798 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24799 #endif
24801 static rs6000_stack_t *
24802 rs6000_stack_info (void)
24804 /* We should never be called for thunks, we are not set up for that. */
24805 gcc_assert (!cfun->is_thunk);
24807 rs6000_stack_t *info = &stack_info;
24808 int reg_size = TARGET_32BIT ? 4 : 8;
24809 int ehrd_size;
24810 int ehcr_size;
24811 int save_align;
24812 int first_gp;
24813 HOST_WIDE_INT non_fixed_size;
24814 bool using_static_chain_p;
24816 if (reload_completed && info->reload_completed)
24817 return info;
24819 memset (info, 0, sizeof (*info));
24820 info->reload_completed = reload_completed;
24822 /* Select which calling sequence. */
24823 info->abi = DEFAULT_ABI;
24825 /* Calculate which registers need to be saved & save area size. */
24826 info->first_gp_reg_save = first_reg_to_save ();
24827 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24828 even if it currently looks like we won't. Reload may need it to
24829 get at a constant; if so, it will have already created a constant
24830 pool entry for it. */
24831 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24832 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24833 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24834 && crtl->uses_const_pool
24835 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24836 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24837 else
24838 first_gp = info->first_gp_reg_save;
24840 info->gp_size = reg_size * (32 - first_gp);
24842 info->first_fp_reg_save = first_fp_reg_to_save ();
24843 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24845 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24846 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24847 - info->first_altivec_reg_save);
24849 /* Does this function call anything? */
24850 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24852 /* Determine if we need to save the condition code registers. */
24853 if (save_reg_p (CR2_REGNO)
24854 || save_reg_p (CR3_REGNO)
24855 || save_reg_p (CR4_REGNO))
24857 info->cr_save_p = 1;
24858 if (DEFAULT_ABI == ABI_V4)
24859 info->cr_size = reg_size;
24862 /* If the current function calls __builtin_eh_return, then we need
24863 to allocate stack space for registers that will hold data for
24864 the exception handler. */
24865 if (crtl->calls_eh_return)
24867 unsigned int i;
24868 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24869 continue;
24871 ehrd_size = i * UNITS_PER_WORD;
24873 else
24874 ehrd_size = 0;
24876 /* In the ELFv2 ABI, we also need to allocate space for separate
24877 CR field save areas if the function calls __builtin_eh_return. */
24878 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24880 /* This hard-codes that we have three call-saved CR fields. */
24881 ehcr_size = 3 * reg_size;
24882 /* We do *not* use the regular CR save mechanism. */
24883 info->cr_save_p = 0;
24885 else
24886 ehcr_size = 0;
24888 /* Determine various sizes. */
24889 info->reg_size = reg_size;
24890 info->fixed_size = RS6000_SAVE_AREA;
24891 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24892 if (cfun->calls_alloca)
24893 info->parm_size =
24894 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24895 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24896 else
24897 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24898 TARGET_ALTIVEC ? 16 : 8);
24899 if (FRAME_GROWS_DOWNWARD)
24900 info->vars_size
24901 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24902 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24903 - (info->fixed_size + info->vars_size + info->parm_size);
24905 if (TARGET_ALTIVEC_ABI)
24906 info->vrsave_mask = compute_vrsave_mask ();
24908 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24909 info->vrsave_size = 4;
24911 compute_save_world_info (info);
24913 /* Calculate the offsets. */
24914 switch (DEFAULT_ABI)
24916 case ABI_NONE:
24917 default:
24918 gcc_unreachable ();
24920 case ABI_AIX:
24921 case ABI_ELFv2:
24922 case ABI_DARWIN:
24923 info->fp_save_offset = -info->fp_size;
24924 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24926 if (TARGET_ALTIVEC_ABI)
24928 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24930 /* Align stack so vector save area is on a quadword boundary.
24931 The padding goes above the vectors. */
24932 if (info->altivec_size != 0)
24933 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24935 info->altivec_save_offset = info->vrsave_save_offset
24936 - info->altivec_padding_size
24937 - info->altivec_size;
24938 gcc_assert (info->altivec_size == 0
24939 || info->altivec_save_offset % 16 == 0);
24941 /* Adjust for AltiVec case. */
24942 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24944 else
24945 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24947 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24948 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24949 info->lr_save_offset = 2*reg_size;
24950 break;
24952 case ABI_V4:
24953 info->fp_save_offset = -info->fp_size;
24954 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24955 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24957 if (TARGET_ALTIVEC_ABI)
24959 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24961 /* Align stack so vector save area is on a quadword boundary. */
24962 if (info->altivec_size != 0)
24963 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24965 info->altivec_save_offset = info->vrsave_save_offset
24966 - info->altivec_padding_size
24967 - info->altivec_size;
24969 /* Adjust for AltiVec case. */
24970 info->ehrd_offset = info->altivec_save_offset;
24972 else
24973 info->ehrd_offset = info->cr_save_offset;
24975 info->ehrd_offset -= ehrd_size;
24976 info->lr_save_offset = reg_size;
24979 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24980 info->save_size = RS6000_ALIGN (info->fp_size
24981 + info->gp_size
24982 + info->altivec_size
24983 + info->altivec_padding_size
24984 + ehrd_size
24985 + ehcr_size
24986 + info->cr_size
24987 + info->vrsave_size,
24988 save_align);
24990 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24992 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24993 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24995 /* Determine if we need to save the link register. */
24996 if (info->calls_p
24997 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24998 && crtl->profile
24999 && !TARGET_PROFILE_KERNEL)
25000 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
25001 #ifdef TARGET_RELOCATABLE
25002 || (DEFAULT_ABI == ABI_V4
25003 && (TARGET_RELOCATABLE || flag_pic > 1)
25004 && !constant_pool_empty_p ())
25005 #endif
25006 || rs6000_ra_ever_killed ())
25007 info->lr_save_p = 1;
25009 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
25010 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
25011 && call_used_regs[STATIC_CHAIN_REGNUM]);
25012 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
25014 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
25015 || !(info->savres_strategy & SAVE_INLINE_FPRS)
25016 || !(info->savres_strategy & SAVE_INLINE_VRS)
25017 || !(info->savres_strategy & REST_INLINE_GPRS)
25018 || !(info->savres_strategy & REST_INLINE_FPRS)
25019 || !(info->savres_strategy & REST_INLINE_VRS))
25020 info->lr_save_p = 1;
25022 if (info->lr_save_p)
25023 df_set_regs_ever_live (LR_REGNO, true);
25025 /* Determine if we need to allocate any stack frame:
25027 For AIX we need to push the stack if a frame pointer is needed
25028 (because the stack might be dynamically adjusted), if we are
25029 debugging, if we make calls, or if the sum of fp_save, gp_save,
25030 and local variables are more than the space needed to save all
25031 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
25032 + 18*8 = 288 (GPR13 reserved).
25034 For V.4 we don't have the stack cushion that AIX uses, but assume
25035 that the debugger can handle stackless frames. */
25037 if (info->calls_p)
25038 info->push_p = 1;
25040 else if (DEFAULT_ABI == ABI_V4)
25041 info->push_p = non_fixed_size != 0;
25043 else if (frame_pointer_needed)
25044 info->push_p = 1;
25046 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
25047 info->push_p = 1;
25049 else
25050 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
25052 return info;
25055 static void
25056 debug_stack_info (rs6000_stack_t *info)
25058 const char *abi_string;
25060 if (! info)
25061 info = rs6000_stack_info ();
25063 fprintf (stderr, "\nStack information for function %s:\n",
25064 ((current_function_decl && DECL_NAME (current_function_decl))
25065 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
25066 : "<unknown>"));
25068 switch (info->abi)
25070 default: abi_string = "Unknown"; break;
25071 case ABI_NONE: abi_string = "NONE"; break;
25072 case ABI_AIX: abi_string = "AIX"; break;
25073 case ABI_ELFv2: abi_string = "ELFv2"; break;
25074 case ABI_DARWIN: abi_string = "Darwin"; break;
25075 case ABI_V4: abi_string = "V.4"; break;
25078 fprintf (stderr, "\tABI = %5s\n", abi_string);
25080 if (TARGET_ALTIVEC_ABI)
25081 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
25083 if (info->first_gp_reg_save != 32)
25084 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
25086 if (info->first_fp_reg_save != 64)
25087 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
25089 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
25090 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
25091 info->first_altivec_reg_save);
25093 if (info->lr_save_p)
25094 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
25096 if (info->cr_save_p)
25097 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
25099 if (info->vrsave_mask)
25100 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
25102 if (info->push_p)
25103 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25105 if (info->calls_p)
25106 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25108 if (info->gp_size)
25109 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25111 if (info->fp_size)
25112 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25114 if (info->altivec_size)
25115 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25116 info->altivec_save_offset);
25118 if (info->vrsave_size)
25119 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25120 info->vrsave_save_offset);
25122 if (info->lr_save_p)
25123 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25125 if (info->cr_save_p)
25126 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25128 if (info->varargs_save_offset)
25129 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25131 if (info->total_size)
25132 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25133 info->total_size);
25135 if (info->vars_size)
25136 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25137 info->vars_size);
25139 if (info->parm_size)
25140 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25142 if (info->fixed_size)
25143 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25145 if (info->gp_size)
25146 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25148 if (info->fp_size)
25149 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25151 if (info->altivec_size)
25152 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25154 if (info->vrsave_size)
25155 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25157 if (info->altivec_padding_size)
25158 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25159 info->altivec_padding_size);
25161 if (info->cr_size)
25162 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25164 if (info->save_size)
25165 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25167 if (info->reg_size != 4)
25168 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25170 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25172 fprintf (stderr, "\n");
25176 rs6000_return_addr (int count, rtx frame)
25178 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25179 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25180 if (count != 0
25181 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25183 cfun->machine->ra_needs_full_frame = 1;
25185 if (count == 0)
25186 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25187 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25188 frame = stack_pointer_rtx;
25189 rtx prev_frame_addr = memory_address (Pmode, frame);
25190 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25191 rtx lr_save_off = plus_constant (Pmode,
25192 prev_frame, RETURN_ADDRESS_OFFSET);
25193 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25194 return gen_rtx_MEM (Pmode, lr_save_addr);
25197 cfun->machine->ra_need_lr = 1;
25198 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25201 /* Say whether a function is a candidate for sibcall handling or not. */
25203 static bool
25204 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25206 tree fntype;
25208 if (decl)
25209 fntype = TREE_TYPE (decl);
25210 else
25211 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25213 /* We can't do it if the called function has more vector parameters
25214 than the current function; there's nowhere to put the VRsave code. */
25215 if (TARGET_ALTIVEC_ABI
25216 && TARGET_ALTIVEC_VRSAVE
25217 && !(decl && decl == current_function_decl))
25219 function_args_iterator args_iter;
25220 tree type;
25221 int nvreg = 0;
25223 /* Functions with vector parameters are required to have a
25224 prototype, so the argument type info must be available
25225 here. */
25226 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25227 if (TREE_CODE (type) == VECTOR_TYPE
25228 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25229 nvreg++;
25231 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25232 if (TREE_CODE (type) == VECTOR_TYPE
25233 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25234 nvreg--;
25236 if (nvreg > 0)
25237 return false;
25240 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25241 functions, because the callee may have a different TOC pointer to
25242 the caller and there's no way to ensure we restore the TOC when
25243 we return. With the secure-plt SYSV ABI we can't make non-local
25244 calls when -fpic/PIC because the plt call stubs use r30. */
25245 if (DEFAULT_ABI == ABI_DARWIN
25246 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25247 && decl
25248 && !DECL_EXTERNAL (decl)
25249 && !DECL_WEAK (decl)
25250 && (*targetm.binds_local_p) (decl))
25251 || (DEFAULT_ABI == ABI_V4
25252 && (!TARGET_SECURE_PLT
25253 || !flag_pic
25254 || (decl
25255 && (*targetm.binds_local_p) (decl)))))
25257 tree attr_list = TYPE_ATTRIBUTES (fntype);
25259 if (!lookup_attribute ("longcall", attr_list)
25260 || lookup_attribute ("shortcall", attr_list))
25261 return true;
25264 return false;
25267 static int
25268 rs6000_ra_ever_killed (void)
25270 rtx_insn *top;
25271 rtx reg;
25272 rtx_insn *insn;
25274 if (cfun->is_thunk)
25275 return 0;
25277 if (cfun->machine->lr_save_state)
25278 return cfun->machine->lr_save_state - 1;
25280 /* regs_ever_live has LR marked as used if any sibcalls are present,
25281 but this should not force saving and restoring in the
25282 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25283 clobbers LR, so that is inappropriate. */
25285 /* Also, the prologue can generate a store into LR that
25286 doesn't really count, like this:
25288 move LR->R0
25289 bcl to set PIC register
25290 move LR->R31
25291 move R0->LR
25293 When we're called from the epilogue, we need to avoid counting
25294 this as a store. */
25296 push_topmost_sequence ();
25297 top = get_insns ();
25298 pop_topmost_sequence ();
25299 reg = gen_rtx_REG (Pmode, LR_REGNO);
25301 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25303 if (INSN_P (insn))
25305 if (CALL_P (insn))
25307 if (!SIBLING_CALL_P (insn))
25308 return 1;
25310 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25311 return 1;
25312 else if (set_of (reg, insn) != NULL_RTX
25313 && !prologue_epilogue_contains (insn))
25314 return 1;
25317 return 0;
25320 /* Emit instructions needed to load the TOC register.
25321 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25322 a constant pool; or for SVR4 -fpic. */
25324 void
25325 rs6000_emit_load_toc_table (int fromprolog)
25327 rtx dest;
25328 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25330 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25332 char buf[30];
25333 rtx lab, tmp1, tmp2, got;
25335 lab = gen_label_rtx ();
25336 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25337 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25338 if (flag_pic == 2)
25340 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25341 need_toc_init = 1;
25343 else
25344 got = rs6000_got_sym ();
25345 tmp1 = tmp2 = dest;
25346 if (!fromprolog)
25348 tmp1 = gen_reg_rtx (Pmode);
25349 tmp2 = gen_reg_rtx (Pmode);
25351 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25352 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25353 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25354 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25356 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25358 emit_insn (gen_load_toc_v4_pic_si ());
25359 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25361 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25363 char buf[30];
25364 rtx temp0 = (fromprolog
25365 ? gen_rtx_REG (Pmode, 0)
25366 : gen_reg_rtx (Pmode));
25368 if (fromprolog)
25370 rtx symF, symL;
25372 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25373 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25375 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25376 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25378 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25379 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25380 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25382 else
25384 rtx tocsym, lab;
25386 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25387 need_toc_init = 1;
25388 lab = gen_label_rtx ();
25389 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25390 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25391 if (TARGET_LINK_STACK)
25392 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25393 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25395 emit_insn (gen_addsi3 (dest, temp0, dest));
25397 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25399 /* This is for AIX code running in non-PIC ELF32. */
25400 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25402 need_toc_init = 1;
25403 emit_insn (gen_elf_high (dest, realsym));
25404 emit_insn (gen_elf_low (dest, dest, realsym));
25406 else
25408 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25410 if (TARGET_32BIT)
25411 emit_insn (gen_load_toc_aix_si (dest));
25412 else
25413 emit_insn (gen_load_toc_aix_di (dest));
25417 /* Emit instructions to restore the link register after determining where
25418 its value has been stored. */
25420 void
25421 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25423 rs6000_stack_t *info = rs6000_stack_info ();
25424 rtx operands[2];
25426 operands[0] = source;
25427 operands[1] = scratch;
25429 if (info->lr_save_p)
25431 rtx frame_rtx = stack_pointer_rtx;
25432 HOST_WIDE_INT sp_offset = 0;
25433 rtx tmp;
25435 if (frame_pointer_needed
25436 || cfun->calls_alloca
25437 || info->total_size > 32767)
25439 tmp = gen_frame_mem (Pmode, frame_rtx);
25440 emit_move_insn (operands[1], tmp);
25441 frame_rtx = operands[1];
25443 else if (info->push_p)
25444 sp_offset = info->total_size;
25446 tmp = plus_constant (Pmode, frame_rtx,
25447 info->lr_save_offset + sp_offset);
25448 tmp = gen_frame_mem (Pmode, tmp);
25449 emit_move_insn (tmp, operands[0]);
25451 else
25452 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25454 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25455 state of lr_save_p so any change from here on would be a bug. In
25456 particular, stop rs6000_ra_ever_killed from considering the SET
25457 of lr we may have added just above. */
25458 cfun->machine->lr_save_state = info->lr_save_p + 1;
25461 static GTY(()) alias_set_type set = -1;
25463 alias_set_type
25464 get_TOC_alias_set (void)
25466 if (set == -1)
25467 set = new_alias_set ();
25468 return set;
25471 /* This returns nonzero if the current function uses the TOC. This is
25472 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25473 is generated by the ABI_V4 load_toc_* patterns.
25474 Return 2 instead of 1 if the load_toc_* pattern is in the function
25475 partition that doesn't start the function. */
25476 #if TARGET_ELF
25477 static int
25478 uses_TOC (void)
25480 rtx_insn *insn;
25481 int ret = 1;
25483 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25485 if (INSN_P (insn))
25487 rtx pat = PATTERN (insn);
25488 int i;
25490 if (GET_CODE (pat) == PARALLEL)
25491 for (i = 0; i < XVECLEN (pat, 0); i++)
25493 rtx sub = XVECEXP (pat, 0, i);
25494 if (GET_CODE (sub) == USE)
25496 sub = XEXP (sub, 0);
25497 if (GET_CODE (sub) == UNSPEC
25498 && XINT (sub, 1) == UNSPEC_TOC)
25499 return ret;
25503 else if (crtl->has_bb_partition
25504 && NOTE_P (insn)
25505 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25506 ret = 2;
25508 return 0;
25510 #endif
25513 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25515 rtx tocrel, tocreg, hi;
25517 if (TARGET_DEBUG_ADDR)
25519 if (GET_CODE (symbol) == SYMBOL_REF)
25520 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25521 XSTR (symbol, 0));
25522 else
25524 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25525 GET_RTX_NAME (GET_CODE (symbol)));
25526 debug_rtx (symbol);
25530 if (!can_create_pseudo_p ())
25531 df_set_regs_ever_live (TOC_REGISTER, true);
25533 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25534 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25535 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25536 return tocrel;
25538 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25539 if (largetoc_reg != NULL)
25541 emit_move_insn (largetoc_reg, hi);
25542 hi = largetoc_reg;
25544 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25547 /* Issue assembly directives that create a reference to the given DWARF
25548 FRAME_TABLE_LABEL from the current function section. */
25549 void
25550 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25552 fprintf (asm_out_file, "\t.ref %s\n",
25553 (* targetm.strip_name_encoding) (frame_table_label));
25556 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25557 and the change to the stack pointer. */
25559 static void
25560 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25562 rtvec p;
25563 int i;
25564 rtx regs[3];
25566 i = 0;
25567 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25568 if (hard_frame_needed)
25569 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25570 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25571 || (hard_frame_needed
25572 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25573 regs[i++] = fp;
25575 p = rtvec_alloc (i);
25576 while (--i >= 0)
25578 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25579 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25582 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25585 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25586 and set the appropriate attributes for the generated insn. Return the
25587 first insn which adjusts the stack pointer or the last insn before
25588 the stack adjustment loop.
25590 SIZE_INT is used to create the CFI note for the allocation.
25592 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25593 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25595 ORIG_SP contains the backchain value that must be stored at *sp. */
25597 static rtx_insn *
25598 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25600 rtx_insn *insn;
25602 rtx size_rtx = GEN_INT (-size_int);
25603 if (size_int > 32767)
25605 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25606 /* Need a note here so that try_split doesn't get confused. */
25607 if (get_last_insn () == NULL_RTX)
25608 emit_note (NOTE_INSN_DELETED);
25609 insn = emit_move_insn (tmp_reg, size_rtx);
25610 try_split (PATTERN (insn), insn, 0);
25611 size_rtx = tmp_reg;
25614 if (Pmode == SImode)
25615 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25616 stack_pointer_rtx,
25617 size_rtx,
25618 orig_sp));
25619 else
25620 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25621 stack_pointer_rtx,
25622 size_rtx,
25623 orig_sp));
25624 rtx par = PATTERN (insn);
25625 gcc_assert (GET_CODE (par) == PARALLEL);
25626 rtx set = XVECEXP (par, 0, 0);
25627 gcc_assert (GET_CODE (set) == SET);
25628 rtx mem = SET_DEST (set);
25629 gcc_assert (MEM_P (mem));
25630 MEM_NOTRAP_P (mem) = 1;
25631 set_mem_alias_set (mem, get_frame_alias_set ());
25633 RTX_FRAME_RELATED_P (insn) = 1;
25634 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25635 gen_rtx_SET (stack_pointer_rtx,
25636 gen_rtx_PLUS (Pmode,
25637 stack_pointer_rtx,
25638 GEN_INT (-size_int))));
25640 /* Emit a blockage to ensure the allocation/probing insns are
25641 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25642 note for similar reasons. */
25643 if (flag_stack_clash_protection)
25645 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25646 emit_insn (gen_blockage ());
25649 return insn;
25652 static HOST_WIDE_INT
25653 get_stack_clash_protection_probe_interval (void)
25655 return (HOST_WIDE_INT_1U
25656 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25659 static HOST_WIDE_INT
25660 get_stack_clash_protection_guard_size (void)
25662 return (HOST_WIDE_INT_1U
25663 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25666 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25667 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25669 COPY_REG, if non-null, should contain a copy of the original
25670 stack pointer at exit from this function.
25672 This is subtly different than the Ada probing in that it tries hard to
25673 prevent attacks that jump the stack guard. Thus it is never allowed to
25674 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25675 space without a suitable probe. */
25676 static rtx_insn *
25677 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25678 rtx copy_reg)
25680 rtx orig_sp = copy_reg;
25682 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25684 /* Round the size down to a multiple of PROBE_INTERVAL. */
25685 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25687 /* If explicitly requested,
25688 or the rounded size is not the same as the original size
25689 or the the rounded size is greater than a page,
25690 then we will need a copy of the original stack pointer. */
25691 if (rounded_size != orig_size
25692 || rounded_size > probe_interval
25693 || copy_reg)
25695 /* If the caller did not request a copy of the incoming stack
25696 pointer, then we use r0 to hold the copy. */
25697 if (!copy_reg)
25698 orig_sp = gen_rtx_REG (Pmode, 0);
25699 emit_move_insn (orig_sp, stack_pointer_rtx);
25702 /* There's three cases here.
25704 One is a single probe which is the most common and most efficiently
25705 implemented as it does not have to have a copy of the original
25706 stack pointer if there are no residuals.
25708 Second is unrolled allocation/probes which we use if there's just
25709 a few of them. It needs to save the original stack pointer into a
25710 temporary for use as a source register in the allocation/probe.
25712 Last is a loop. This is the most uncommon case and least efficient. */
25713 rtx_insn *retval = NULL;
25714 if (rounded_size == probe_interval)
25716 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25718 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25720 else if (rounded_size <= 8 * probe_interval)
25722 /* The ABI requires using the store with update insns to allocate
25723 space and store the backchain into the stack
25725 So we save the current stack pointer into a temporary, then
25726 emit the store-with-update insns to store the saved stack pointer
25727 into the right location in each new page. */
25728 for (int i = 0; i < rounded_size; i += probe_interval)
25730 rtx_insn *insn
25731 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25733 /* Save the first stack adjustment in RETVAL. */
25734 if (i == 0)
25735 retval = insn;
25738 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25740 else
25742 /* Compute the ending address. */
25743 rtx end_addr
25744 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25745 rtx rs = GEN_INT (-rounded_size);
25746 rtx_insn *insn;
25747 if (add_operand (rs, Pmode))
25748 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25749 else
25751 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25752 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25753 stack_pointer_rtx));
25754 /* Describe the effect of INSN to the CFI engine. */
25755 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25756 gen_rtx_SET (end_addr,
25757 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25758 rs)));
25760 RTX_FRAME_RELATED_P (insn) = 1;
25762 /* Emit the loop. */
25763 if (TARGET_64BIT)
25764 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25765 stack_pointer_rtx, orig_sp,
25766 end_addr));
25767 else
25768 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25769 stack_pointer_rtx, orig_sp,
25770 end_addr));
25771 RTX_FRAME_RELATED_P (retval) = 1;
25772 /* Describe the effect of INSN to the CFI engine. */
25773 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25774 gen_rtx_SET (stack_pointer_rtx, end_addr));
25776 /* Emit a blockage to ensure the allocation/probing insns are
25777 not optimized, combined, removed, etc. Other cases handle this
25778 within their call to rs6000_emit_allocate_stack_1. */
25779 emit_insn (gen_blockage ());
25781 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25784 if (orig_size != rounded_size)
25786 /* Allocate (and implicitly probe) any residual space. */
25787 HOST_WIDE_INT residual = orig_size - rounded_size;
25789 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25791 /* If the residual was the only allocation, then we can return the
25792 allocating insn. */
25793 if (!retval)
25794 retval = insn;
25797 return retval;
25800 /* Emit the correct code for allocating stack space, as insns.
25801 If COPY_REG, make sure a copy of the old frame is left there.
25802 The generated code may use hard register 0 as a temporary. */
25804 static rtx_insn *
25805 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25807 rtx_insn *insn;
25808 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25809 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25810 rtx todec = gen_int_mode (-size, Pmode);
25812 if (INTVAL (todec) != -size)
25814 warning (0, "stack frame too large");
25815 emit_insn (gen_trap ());
25816 return 0;
25819 if (crtl->limit_stack)
25821 if (REG_P (stack_limit_rtx)
25822 && REGNO (stack_limit_rtx) > 1
25823 && REGNO (stack_limit_rtx) <= 31)
25825 rtx_insn *insn
25826 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25827 gcc_assert (insn);
25828 emit_insn (insn);
25829 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25831 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25832 && TARGET_32BIT
25833 && DEFAULT_ABI == ABI_V4
25834 && !flag_pic)
25836 rtx toload = gen_rtx_CONST (VOIDmode,
25837 gen_rtx_PLUS (Pmode,
25838 stack_limit_rtx,
25839 GEN_INT (size)));
25841 emit_insn (gen_elf_high (tmp_reg, toload));
25842 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25843 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25844 const0_rtx));
25846 else
25847 warning (0, "stack limit expression is not supported");
25850 if (flag_stack_clash_protection)
25852 if (size < get_stack_clash_protection_guard_size ())
25853 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25854 else
25856 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25857 copy_reg);
25859 /* If we asked for a copy with an offset, then we still need add in
25860 the offset. */
25861 if (copy_reg && copy_off)
25862 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25863 return insn;
25867 if (copy_reg)
25869 if (copy_off != 0)
25870 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25871 else
25872 emit_move_insn (copy_reg, stack_reg);
25875 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25876 it now and set the alias set/attributes. The above gen_*_update
25877 calls will generate a PARALLEL with the MEM set being the first
25878 operation. */
25879 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25880 return insn;
25883 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25885 #if PROBE_INTERVAL > 32768
25886 #error Cannot use indexed addressing mode for stack probing
25887 #endif
25889 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25890 inclusive. These are offsets from the current stack pointer. */
25892 static void
25893 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25895 /* See if we have a constant small number of probes to generate. If so,
25896 that's the easy case. */
25897 if (first + size <= 32768)
25899 HOST_WIDE_INT i;
25901 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25902 it exceeds SIZE. If only one probe is needed, this will not
25903 generate any code. Then probe at FIRST + SIZE. */
25904 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25905 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25906 -(first + i)));
25908 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25909 -(first + size)));
25912 /* Otherwise, do the same as above, but in a loop. Note that we must be
25913 extra careful with variables wrapping around because we might be at
25914 the very top (or the very bottom) of the address space and we have
25915 to be able to handle this case properly; in particular, we use an
25916 equality test for the loop condition. */
25917 else
25919 HOST_WIDE_INT rounded_size;
25920 rtx r12 = gen_rtx_REG (Pmode, 12);
25921 rtx r0 = gen_rtx_REG (Pmode, 0);
25923 /* Sanity check for the addressing mode we're going to use. */
25924 gcc_assert (first <= 32768);
25926 /* Step 1: round SIZE to the previous multiple of the interval. */
25928 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25931 /* Step 2: compute initial and final value of the loop counter. */
25933 /* TEST_ADDR = SP + FIRST. */
25934 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25935 -first)));
25937 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25938 if (rounded_size > 32768)
25940 emit_move_insn (r0, GEN_INT (-rounded_size));
25941 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25943 else
25944 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25945 -rounded_size)));
25948 /* Step 3: the loop
25952 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25953 probe at TEST_ADDR
25955 while (TEST_ADDR != LAST_ADDR)
25957 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25958 until it is equal to ROUNDED_SIZE. */
25960 if (TARGET_64BIT)
25961 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25962 else
25963 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25966 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25967 that SIZE is equal to ROUNDED_SIZE. */
25969 if (size != rounded_size)
25970 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25974 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25975 addresses, not offsets. */
25977 static const char *
25978 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25980 static int labelno = 0;
25981 char loop_lab[32];
25982 rtx xops[2];
25984 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25986 /* Loop. */
25987 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25989 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25990 xops[0] = reg1;
25991 xops[1] = GEN_INT (-PROBE_INTERVAL);
25992 output_asm_insn ("addi %0,%0,%1", xops);
25994 /* Probe at TEST_ADDR. */
25995 xops[1] = gen_rtx_REG (Pmode, 0);
25996 output_asm_insn ("stw %1,0(%0)", xops);
25998 /* Test if TEST_ADDR == LAST_ADDR. */
25999 xops[1] = reg2;
26000 if (TARGET_64BIT)
26001 output_asm_insn ("cmpd 0,%0,%1", xops);
26002 else
26003 output_asm_insn ("cmpw 0,%0,%1", xops);
26005 /* Branch. */
26006 fputs ("\tbne 0,", asm_out_file);
26007 assemble_name_raw (asm_out_file, loop_lab);
26008 fputc ('\n', asm_out_file);
26010 return "";
26013 /* This function is called when rs6000_frame_related is processing
26014 SETs within a PARALLEL, and returns whether the REGNO save ought to
26015 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
26016 for out-of-line register save functions, store multiple, and the
26017 Darwin world_save. They may contain registers that don't really
26018 need saving. */
26020 static bool
26021 interesting_frame_related_regno (unsigned int regno)
26023 /* Saves apparently of r0 are actually saving LR. It doesn't make
26024 sense to substitute the regno here to test save_reg_p (LR_REGNO).
26025 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
26026 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
26027 as frame related. */
26028 if (regno == 0)
26029 return true;
26030 /* If we see CR2 then we are here on a Darwin world save. Saves of
26031 CR2 signify the whole CR is being saved. This is a long-standing
26032 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
26033 that CR needs to be saved. */
26034 if (regno == CR2_REGNO)
26035 return true;
26036 /* Omit frame info for any user-defined global regs. If frame info
26037 is supplied for them, frame unwinding will restore a user reg.
26038 Also omit frame info for any reg we don't need to save, as that
26039 bloats frame info and can cause problems with shrink wrapping.
26040 Since global regs won't be seen as needing to be saved, both of
26041 these conditions are covered by save_reg_p. */
26042 return save_reg_p (regno);
26045 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
26046 addresses, not offsets.
26048 REG2 contains the backchain that must be stored into *sp at each allocation.
26050 This is subtly different than the Ada probing above in that it tries hard
26051 to prevent attacks that jump the stack guard. Thus, it is never allowed
26052 to allocate more than PROBE_INTERVAL bytes of stack space without a
26053 suitable probe. */
26055 static const char *
26056 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
26058 static int labelno = 0;
26059 char loop_lab[32];
26060 rtx xops[3];
26062 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
26064 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
26066 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
26068 /* This allocates and probes. */
26069 xops[0] = reg1;
26070 xops[1] = reg2;
26071 xops[2] = GEN_INT (-probe_interval);
26072 if (TARGET_64BIT)
26073 output_asm_insn ("stdu %1,%2(%0)", xops);
26074 else
26075 output_asm_insn ("stwu %1,%2(%0)", xops);
26077 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
26078 xops[0] = reg1;
26079 xops[1] = reg3;
26080 if (TARGET_64BIT)
26081 output_asm_insn ("cmpd 0,%0,%1", xops);
26082 else
26083 output_asm_insn ("cmpw 0,%0,%1", xops);
26085 fputs ("\tbne 0,", asm_out_file);
26086 assemble_name_raw (asm_out_file, loop_lab);
26087 fputc ('\n', asm_out_file);
26089 return "";
26092 /* Wrapper around the output_probe_stack_range routines. */
26093 const char *
26094 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
26096 if (flag_stack_clash_protection)
26097 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
26098 else
26099 return output_probe_stack_range_1 (reg1, reg3);
26102 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26103 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26104 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26105 deduce these equivalences by itself so it wasn't necessary to hold
26106 its hand so much. Don't be tempted to always supply d2_f_d_e with
26107 the actual cfa register, ie. r31 when we are using a hard frame
26108 pointer. That fails when saving regs off r1, and sched moves the
26109 r31 setup past the reg saves. */
26111 static rtx_insn *
26112 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
26113 rtx reg2, rtx repl2)
26115 rtx repl;
26117 if (REGNO (reg) == STACK_POINTER_REGNUM)
26119 gcc_checking_assert (val == 0);
26120 repl = NULL_RTX;
26122 else
26123 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26124 GEN_INT (val));
26126 rtx pat = PATTERN (insn);
26127 if (!repl && !reg2)
26129 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26130 if (GET_CODE (pat) == PARALLEL)
26131 for (int i = 0; i < XVECLEN (pat, 0); i++)
26132 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26134 rtx set = XVECEXP (pat, 0, i);
26136 if (!REG_P (SET_SRC (set))
26137 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26138 RTX_FRAME_RELATED_P (set) = 1;
26140 RTX_FRAME_RELATED_P (insn) = 1;
26141 return insn;
26144 /* We expect that 'pat' is either a SET or a PARALLEL containing
26145 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26146 are important so they all have to be marked RTX_FRAME_RELATED_P.
26147 Call simplify_replace_rtx on the SETs rather than the whole insn
26148 so as to leave the other stuff alone (for example USE of r12). */
26150 set_used_flags (pat);
26151 if (GET_CODE (pat) == SET)
26153 if (repl)
26154 pat = simplify_replace_rtx (pat, reg, repl);
26155 if (reg2)
26156 pat = simplify_replace_rtx (pat, reg2, repl2);
26158 else if (GET_CODE (pat) == PARALLEL)
26160 pat = shallow_copy_rtx (pat);
26161 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26163 for (int i = 0; i < XVECLEN (pat, 0); i++)
26164 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26166 rtx set = XVECEXP (pat, 0, i);
26168 if (repl)
26169 set = simplify_replace_rtx (set, reg, repl);
26170 if (reg2)
26171 set = simplify_replace_rtx (set, reg2, repl2);
26172 XVECEXP (pat, 0, i) = set;
26174 if (!REG_P (SET_SRC (set))
26175 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26176 RTX_FRAME_RELATED_P (set) = 1;
26179 else
26180 gcc_unreachable ();
26182 RTX_FRAME_RELATED_P (insn) = 1;
26183 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26185 return insn;
26188 /* Returns an insn that has a vrsave set operation with the
26189 appropriate CLOBBERs. */
26191 static rtx
26192 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26194 int nclobs, i;
26195 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26196 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26198 clobs[0]
26199 = gen_rtx_SET (vrsave,
26200 gen_rtx_UNSPEC_VOLATILE (SImode,
26201 gen_rtvec (2, reg, vrsave),
26202 UNSPECV_SET_VRSAVE));
26204 nclobs = 1;
26206 /* We need to clobber the registers in the mask so the scheduler
26207 does not move sets to VRSAVE before sets of AltiVec registers.
26209 However, if the function receives nonlocal gotos, reload will set
26210 all call saved registers live. We will end up with:
26212 (set (reg 999) (mem))
26213 (parallel [ (set (reg vrsave) (unspec blah))
26214 (clobber (reg 999))])
26216 The clobber will cause the store into reg 999 to be dead, and
26217 flow will attempt to delete an epilogue insn. In this case, we
26218 need an unspec use/set of the register. */
26220 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26221 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26223 if (!epiloguep || call_used_regs [i])
26224 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26225 gen_rtx_REG (V4SImode, i));
26226 else
26228 rtx reg = gen_rtx_REG (V4SImode, i);
26230 clobs[nclobs++]
26231 = gen_rtx_SET (reg,
26232 gen_rtx_UNSPEC (V4SImode,
26233 gen_rtvec (1, reg), 27));
26237 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26239 for (i = 0; i < nclobs; ++i)
26240 XVECEXP (insn, 0, i) = clobs[i];
26242 return insn;
26245 static rtx
26246 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26248 rtx addr, mem;
26250 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26251 mem = gen_frame_mem (GET_MODE (reg), addr);
26252 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26255 static rtx
26256 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26258 return gen_frame_set (reg, frame_reg, offset, false);
26261 static rtx
26262 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26264 return gen_frame_set (reg, frame_reg, offset, true);
26267 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26268 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26270 static rtx_insn *
26271 emit_frame_save (rtx frame_reg, machine_mode mode,
26272 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26274 rtx reg;
26276 /* Some cases that need register indexed addressing. */
26277 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26278 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26280 reg = gen_rtx_REG (mode, regno);
26281 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26282 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26283 NULL_RTX, NULL_RTX);
26286 /* Emit an offset memory reference suitable for a frame store, while
26287 converting to a valid addressing mode. */
26289 static rtx
26290 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26292 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26295 #ifndef TARGET_FIX_AND_CONTINUE
26296 #define TARGET_FIX_AND_CONTINUE 0
26297 #endif
26299 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26300 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26301 #define LAST_SAVRES_REGISTER 31
26302 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26304 enum {
26305 SAVRES_LR = 0x1,
26306 SAVRES_SAVE = 0x2,
26307 SAVRES_REG = 0x0c,
26308 SAVRES_GPR = 0,
26309 SAVRES_FPR = 4,
26310 SAVRES_VR = 8
26313 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26315 /* Temporary holding space for an out-of-line register save/restore
26316 routine name. */
26317 static char savres_routine_name[30];
26319 /* Return the name for an out-of-line register save/restore routine.
26320 We are saving/restoring GPRs if GPR is true. */
26322 static char *
26323 rs6000_savres_routine_name (int regno, int sel)
26325 const char *prefix = "";
26326 const char *suffix = "";
26328 /* Different targets are supposed to define
26329 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26330 routine name could be defined with:
26332 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26334 This is a nice idea in practice, but in reality, things are
26335 complicated in several ways:
26337 - ELF targets have save/restore routines for GPRs.
26339 - PPC64 ELF targets have routines for save/restore of GPRs that
26340 differ in what they do with the link register, so having a set
26341 prefix doesn't work. (We only use one of the save routines at
26342 the moment, though.)
26344 - PPC32 elf targets have "exit" versions of the restore routines
26345 that restore the link register and can save some extra space.
26346 These require an extra suffix. (There are also "tail" versions
26347 of the restore routines and "GOT" versions of the save routines,
26348 but we don't generate those at present. Same problems apply,
26349 though.)
26351 We deal with all this by synthesizing our own prefix/suffix and
26352 using that for the simple sprintf call shown above. */
26353 if (DEFAULT_ABI == ABI_V4)
26355 if (TARGET_64BIT)
26356 goto aix_names;
26358 if ((sel & SAVRES_REG) == SAVRES_GPR)
26359 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26360 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26361 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26362 else if ((sel & SAVRES_REG) == SAVRES_VR)
26363 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26364 else
26365 abort ();
26367 if ((sel & SAVRES_LR))
26368 suffix = "_x";
26370 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26372 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26373 /* No out-of-line save/restore routines for GPRs on AIX. */
26374 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26375 #endif
26377 aix_names:
26378 if ((sel & SAVRES_REG) == SAVRES_GPR)
26379 prefix = ((sel & SAVRES_SAVE)
26380 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26381 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26382 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26384 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26385 if ((sel & SAVRES_LR))
26386 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26387 else
26388 #endif
26390 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26391 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26394 else if ((sel & SAVRES_REG) == SAVRES_VR)
26395 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26396 else
26397 abort ();
26400 if (DEFAULT_ABI == ABI_DARWIN)
26402 /* The Darwin approach is (slightly) different, in order to be
26403 compatible with code generated by the system toolchain. There is a
26404 single symbol for the start of save sequence, and the code here
26405 embeds an offset into that code on the basis of the first register
26406 to be saved. */
26407 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26408 if ((sel & SAVRES_REG) == SAVRES_GPR)
26409 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26410 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26411 (regno - 13) * 4, prefix, regno);
26412 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26413 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26414 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26415 else if ((sel & SAVRES_REG) == SAVRES_VR)
26416 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26417 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26418 else
26419 abort ();
26421 else
26422 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26424 return savres_routine_name;
26427 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26428 We are saving/restoring GPRs if GPR is true. */
26430 static rtx
26431 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26433 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26434 ? info->first_gp_reg_save
26435 : (sel & SAVRES_REG) == SAVRES_FPR
26436 ? info->first_fp_reg_save - 32
26437 : (sel & SAVRES_REG) == SAVRES_VR
26438 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26439 : -1);
26440 rtx sym;
26441 int select = sel;
26443 /* Don't generate bogus routine names. */
26444 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26445 && regno <= LAST_SAVRES_REGISTER
26446 && select >= 0 && select <= 12);
26448 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26450 if (sym == NULL)
26452 char *name;
26454 name = rs6000_savres_routine_name (regno, sel);
26456 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26457 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26458 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26461 return sym;
26464 /* Emit a sequence of insns, including a stack tie if needed, for
26465 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26466 reset the stack pointer, but move the base of the frame into
26467 reg UPDT_REGNO for use by out-of-line register restore routines. */
26469 static rtx
26470 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26471 unsigned updt_regno)
26473 /* If there is nothing to do, don't do anything. */
26474 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26475 return NULL_RTX;
26477 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26479 /* This blockage is needed so that sched doesn't decide to move
26480 the sp change before the register restores. */
26481 if (DEFAULT_ABI == ABI_V4)
26482 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26483 GEN_INT (frame_off)));
26485 /* If we are restoring registers out-of-line, we will be using the
26486 "exit" variants of the restore routines, which will reset the
26487 stack for us. But we do need to point updt_reg into the
26488 right place for those routines. */
26489 if (frame_off != 0)
26490 return emit_insn (gen_add3_insn (updt_reg_rtx,
26491 frame_reg_rtx, GEN_INT (frame_off)));
26492 else
26493 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26495 return NULL_RTX;
26498 /* Return the register number used as a pointer by out-of-line
26499 save/restore functions. */
26501 static inline unsigned
26502 ptr_regno_for_savres (int sel)
26504 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26505 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26506 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26509 /* Construct a parallel rtx describing the effect of a call to an
26510 out-of-line register save/restore routine, and emit the insn
26511 or jump_insn as appropriate. */
26513 static rtx_insn *
26514 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26515 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26516 machine_mode reg_mode, int sel)
26518 int i;
26519 int offset, start_reg, end_reg, n_regs, use_reg;
26520 int reg_size = GET_MODE_SIZE (reg_mode);
26521 rtx sym;
26522 rtvec p;
26523 rtx par;
26524 rtx_insn *insn;
26526 offset = 0;
26527 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26528 ? info->first_gp_reg_save
26529 : (sel & SAVRES_REG) == SAVRES_FPR
26530 ? info->first_fp_reg_save
26531 : (sel & SAVRES_REG) == SAVRES_VR
26532 ? info->first_altivec_reg_save
26533 : -1);
26534 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26535 ? 32
26536 : (sel & SAVRES_REG) == SAVRES_FPR
26537 ? 64
26538 : (sel & SAVRES_REG) == SAVRES_VR
26539 ? LAST_ALTIVEC_REGNO + 1
26540 : -1);
26541 n_regs = end_reg - start_reg;
26542 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26543 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26544 + n_regs);
26546 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26547 RTVEC_ELT (p, offset++) = ret_rtx;
26549 RTVEC_ELT (p, offset++)
26550 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26552 sym = rs6000_savres_routine_sym (info, sel);
26553 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26555 use_reg = ptr_regno_for_savres (sel);
26556 if ((sel & SAVRES_REG) == SAVRES_VR)
26558 /* Vector regs are saved/restored using [reg+reg] addressing. */
26559 RTVEC_ELT (p, offset++)
26560 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26561 RTVEC_ELT (p, offset++)
26562 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26564 else
26565 RTVEC_ELT (p, offset++)
26566 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26568 for (i = 0; i < end_reg - start_reg; i++)
26569 RTVEC_ELT (p, i + offset)
26570 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26571 frame_reg_rtx, save_area_offset + reg_size * i,
26572 (sel & SAVRES_SAVE) != 0);
26574 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26575 RTVEC_ELT (p, i + offset)
26576 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26578 par = gen_rtx_PARALLEL (VOIDmode, p);
26580 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26582 insn = emit_jump_insn (par);
26583 JUMP_LABEL (insn) = ret_rtx;
26585 else
26586 insn = emit_insn (par);
26587 return insn;
26590 /* Emit prologue code to store CR fields that need to be saved into REG. This
26591 function should only be called when moving the non-volatile CRs to REG, it
26592 is not a general purpose routine to move the entire set of CRs to REG.
26593 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26594 volatile CRs. */
26596 static void
26597 rs6000_emit_prologue_move_from_cr (rtx reg)
26599 /* Only the ELFv2 ABI allows storing only selected fields. */
26600 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26602 int i, cr_reg[8], count = 0;
26604 /* Collect CR fields that must be saved. */
26605 for (i = 0; i < 8; i++)
26606 if (save_reg_p (CR0_REGNO + i))
26607 cr_reg[count++] = i;
26609 /* If it's just a single one, use mfcrf. */
26610 if (count == 1)
26612 rtvec p = rtvec_alloc (1);
26613 rtvec r = rtvec_alloc (2);
26614 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26615 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26616 RTVEC_ELT (p, 0)
26617 = gen_rtx_SET (reg,
26618 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26620 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26621 return;
26624 /* ??? It might be better to handle count == 2 / 3 cases here
26625 as well, using logical operations to combine the values. */
26628 emit_insn (gen_prologue_movesi_from_cr (reg));
26631 /* Return whether the split-stack arg pointer (r12) is used. */
26633 static bool
26634 split_stack_arg_pointer_used_p (void)
26636 /* If the pseudo holding the arg pointer is no longer a pseudo,
26637 then the arg pointer is used. */
26638 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26639 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26640 || (REGNO (cfun->machine->split_stack_arg_pointer)
26641 < FIRST_PSEUDO_REGISTER)))
26642 return true;
26644 /* Unfortunately we also need to do some code scanning, since
26645 r12 may have been substituted for the pseudo. */
26646 rtx_insn *insn;
26647 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26648 FOR_BB_INSNS (bb, insn)
26649 if (NONDEBUG_INSN_P (insn))
26651 /* A call destroys r12. */
26652 if (CALL_P (insn))
26653 return false;
26655 df_ref use;
26656 FOR_EACH_INSN_USE (use, insn)
26658 rtx x = DF_REF_REG (use);
26659 if (REG_P (x) && REGNO (x) == 12)
26660 return true;
26662 df_ref def;
26663 FOR_EACH_INSN_DEF (def, insn)
26665 rtx x = DF_REF_REG (def);
26666 if (REG_P (x) && REGNO (x) == 12)
26667 return false;
26670 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26673 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26675 static bool
26676 rs6000_global_entry_point_needed_p (void)
26678 /* Only needed for the ELFv2 ABI. */
26679 if (DEFAULT_ABI != ABI_ELFv2)
26680 return false;
26682 /* With -msingle-pic-base, we assume the whole program shares the same
26683 TOC, so no global entry point prologues are needed anywhere. */
26684 if (TARGET_SINGLE_PIC_BASE)
26685 return false;
26687 /* Ensure we have a global entry point for thunks. ??? We could
26688 avoid that if the target routine doesn't need a global entry point,
26689 but we do not know whether this is the case at this point. */
26690 if (cfun->is_thunk)
26691 return true;
26693 /* For regular functions, rs6000_emit_prologue sets this flag if the
26694 routine ever uses the TOC pointer. */
26695 return cfun->machine->r2_setup_needed;
26698 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26699 static sbitmap
26700 rs6000_get_separate_components (void)
26702 rs6000_stack_t *info = rs6000_stack_info ();
26704 if (WORLD_SAVE_P (info))
26705 return NULL;
26707 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26708 && !(info->savres_strategy & REST_MULTIPLE));
26710 /* Component 0 is the save/restore of LR (done via GPR0).
26711 Component 2 is the save of the TOC (GPR2).
26712 Components 13..31 are the save/restore of GPR13..GPR31.
26713 Components 46..63 are the save/restore of FPR14..FPR31. */
26715 cfun->machine->n_components = 64;
26717 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26718 bitmap_clear (components);
26720 int reg_size = TARGET_32BIT ? 4 : 8;
26721 int fp_reg_size = 8;
26723 /* The GPRs we need saved to the frame. */
26724 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26725 && (info->savres_strategy & REST_INLINE_GPRS))
26727 int offset = info->gp_save_offset;
26728 if (info->push_p)
26729 offset += info->total_size;
26731 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26733 if (IN_RANGE (offset, -0x8000, 0x7fff)
26734 && save_reg_p (regno))
26735 bitmap_set_bit (components, regno);
26737 offset += reg_size;
26741 /* Don't mess with the hard frame pointer. */
26742 if (frame_pointer_needed)
26743 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26745 /* Don't mess with the fixed TOC register. */
26746 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26747 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26748 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26749 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26751 /* The FPRs we need saved to the frame. */
26752 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26753 && (info->savres_strategy & REST_INLINE_FPRS))
26755 int offset = info->fp_save_offset;
26756 if (info->push_p)
26757 offset += info->total_size;
26759 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26761 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26762 bitmap_set_bit (components, regno);
26764 offset += fp_reg_size;
26768 /* Optimize LR save and restore if we can. This is component 0. Any
26769 out-of-line register save/restore routines need LR. */
26770 if (info->lr_save_p
26771 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26772 && (info->savres_strategy & SAVE_INLINE_GPRS)
26773 && (info->savres_strategy & REST_INLINE_GPRS)
26774 && (info->savres_strategy & SAVE_INLINE_FPRS)
26775 && (info->savres_strategy & REST_INLINE_FPRS)
26776 && (info->savres_strategy & SAVE_INLINE_VRS)
26777 && (info->savres_strategy & REST_INLINE_VRS))
26779 int offset = info->lr_save_offset;
26780 if (info->push_p)
26781 offset += info->total_size;
26782 if (IN_RANGE (offset, -0x8000, 0x7fff))
26783 bitmap_set_bit (components, 0);
26786 /* Optimize saving the TOC. This is component 2. */
26787 if (cfun->machine->save_toc_in_prologue)
26788 bitmap_set_bit (components, 2);
26790 return components;
26793 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26794 static sbitmap
26795 rs6000_components_for_bb (basic_block bb)
26797 rs6000_stack_t *info = rs6000_stack_info ();
26799 bitmap in = DF_LIVE_IN (bb);
26800 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26801 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26803 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26804 bitmap_clear (components);
26806 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26808 /* GPRs. */
26809 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26810 if (bitmap_bit_p (in, regno)
26811 || bitmap_bit_p (gen, regno)
26812 || bitmap_bit_p (kill, regno))
26813 bitmap_set_bit (components, regno);
26815 /* FPRs. */
26816 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26817 if (bitmap_bit_p (in, regno)
26818 || bitmap_bit_p (gen, regno)
26819 || bitmap_bit_p (kill, regno))
26820 bitmap_set_bit (components, regno);
26822 /* The link register. */
26823 if (bitmap_bit_p (in, LR_REGNO)
26824 || bitmap_bit_p (gen, LR_REGNO)
26825 || bitmap_bit_p (kill, LR_REGNO))
26826 bitmap_set_bit (components, 0);
26828 /* The TOC save. */
26829 if (bitmap_bit_p (in, TOC_REGNUM)
26830 || bitmap_bit_p (gen, TOC_REGNUM)
26831 || bitmap_bit_p (kill, TOC_REGNUM))
26832 bitmap_set_bit (components, 2);
26834 return components;
26837 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26838 static void
26839 rs6000_disqualify_components (sbitmap components, edge e,
26840 sbitmap edge_components, bool /*is_prologue*/)
26842 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26843 live where we want to place that code. */
26844 if (bitmap_bit_p (edge_components, 0)
26845 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26847 if (dump_file)
26848 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26849 "on entry to bb %d\n", e->dest->index);
26850 bitmap_clear_bit (components, 0);
26854 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26855 static void
26856 rs6000_emit_prologue_components (sbitmap components)
26858 rs6000_stack_t *info = rs6000_stack_info ();
26859 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26860 ? HARD_FRAME_POINTER_REGNUM
26861 : STACK_POINTER_REGNUM);
26863 machine_mode reg_mode = Pmode;
26864 int reg_size = TARGET_32BIT ? 4 : 8;
26865 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26866 ? DFmode : SFmode;
26867 int fp_reg_size = 8;
26869 /* Prologue for LR. */
26870 if (bitmap_bit_p (components, 0))
26872 rtx reg = gen_rtx_REG (reg_mode, 0);
26873 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26874 RTX_FRAME_RELATED_P (insn) = 1;
26875 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26877 int offset = info->lr_save_offset;
26878 if (info->push_p)
26879 offset += info->total_size;
26881 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26882 RTX_FRAME_RELATED_P (insn) = 1;
26883 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26884 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26885 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26888 /* Prologue for TOC. */
26889 if (bitmap_bit_p (components, 2))
26891 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26892 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26893 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26896 /* Prologue for the GPRs. */
26897 int offset = info->gp_save_offset;
26898 if (info->push_p)
26899 offset += info->total_size;
26901 for (int i = info->first_gp_reg_save; i < 32; i++)
26903 if (bitmap_bit_p (components, i))
26905 rtx reg = gen_rtx_REG (reg_mode, i);
26906 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26907 RTX_FRAME_RELATED_P (insn) = 1;
26908 rtx set = copy_rtx (single_set (insn));
26909 add_reg_note (insn, REG_CFA_OFFSET, set);
26912 offset += reg_size;
26915 /* Prologue for the FPRs. */
26916 offset = info->fp_save_offset;
26917 if (info->push_p)
26918 offset += info->total_size;
26920 for (int i = info->first_fp_reg_save; i < 64; i++)
26922 if (bitmap_bit_p (components, i))
26924 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26925 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26926 RTX_FRAME_RELATED_P (insn) = 1;
26927 rtx set = copy_rtx (single_set (insn));
26928 add_reg_note (insn, REG_CFA_OFFSET, set);
26931 offset += fp_reg_size;
26935 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26936 static void
26937 rs6000_emit_epilogue_components (sbitmap components)
26939 rs6000_stack_t *info = rs6000_stack_info ();
26940 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26941 ? HARD_FRAME_POINTER_REGNUM
26942 : STACK_POINTER_REGNUM);
26944 machine_mode reg_mode = Pmode;
26945 int reg_size = TARGET_32BIT ? 4 : 8;
26947 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26948 ? DFmode : SFmode;
26949 int fp_reg_size = 8;
26951 /* Epilogue for the FPRs. */
26952 int offset = info->fp_save_offset;
26953 if (info->push_p)
26954 offset += info->total_size;
26956 for (int i = info->first_fp_reg_save; i < 64; i++)
26958 if (bitmap_bit_p (components, i))
26960 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26961 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26962 RTX_FRAME_RELATED_P (insn) = 1;
26963 add_reg_note (insn, REG_CFA_RESTORE, reg);
26966 offset += fp_reg_size;
26969 /* Epilogue for the GPRs. */
26970 offset = info->gp_save_offset;
26971 if (info->push_p)
26972 offset += info->total_size;
26974 for (int i = info->first_gp_reg_save; i < 32; i++)
26976 if (bitmap_bit_p (components, i))
26978 rtx reg = gen_rtx_REG (reg_mode, i);
26979 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26980 RTX_FRAME_RELATED_P (insn) = 1;
26981 add_reg_note (insn, REG_CFA_RESTORE, reg);
26984 offset += reg_size;
26987 /* Epilogue for LR. */
26988 if (bitmap_bit_p (components, 0))
26990 int offset = info->lr_save_offset;
26991 if (info->push_p)
26992 offset += info->total_size;
26994 rtx reg = gen_rtx_REG (reg_mode, 0);
26995 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26997 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26998 insn = emit_move_insn (lr, reg);
26999 RTX_FRAME_RELATED_P (insn) = 1;
27000 add_reg_note (insn, REG_CFA_RESTORE, lr);
27004 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
27005 static void
27006 rs6000_set_handled_components (sbitmap components)
27008 rs6000_stack_t *info = rs6000_stack_info ();
27010 for (int i = info->first_gp_reg_save; i < 32; i++)
27011 if (bitmap_bit_p (components, i))
27012 cfun->machine->gpr_is_wrapped_separately[i] = true;
27014 for (int i = info->first_fp_reg_save; i < 64; i++)
27015 if (bitmap_bit_p (components, i))
27016 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
27018 if (bitmap_bit_p (components, 0))
27019 cfun->machine->lr_is_wrapped_separately = true;
27021 if (bitmap_bit_p (components, 2))
27022 cfun->machine->toc_is_wrapped_separately = true;
27025 /* VRSAVE is a bit vector representing which AltiVec registers
27026 are used. The OS uses this to determine which vector
27027 registers to save on a context switch. We need to save
27028 VRSAVE on the stack frame, add whatever AltiVec registers we
27029 used in this function, and do the corresponding magic in the
27030 epilogue. */
27031 static void
27032 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
27033 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
27035 /* Get VRSAVE into a GPR. */
27036 rtx reg = gen_rtx_REG (SImode, save_regno);
27037 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27038 if (TARGET_MACHO)
27039 emit_insn (gen_get_vrsave_internal (reg));
27040 else
27041 emit_insn (gen_rtx_SET (reg, vrsave));
27043 /* Save VRSAVE. */
27044 int offset = info->vrsave_save_offset + frame_off;
27045 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27047 /* Include the registers in the mask. */
27048 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
27050 emit_insn (generate_set_vrsave (reg, info, 0));
27053 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
27054 called, it left the arg pointer to the old stack in r29. Otherwise, the
27055 arg pointer is the top of the current frame. */
27056 static void
27057 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
27058 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
27060 cfun->machine->split_stack_argp_used = true;
27062 if (sp_adjust)
27064 rtx r12 = gen_rtx_REG (Pmode, 12);
27065 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27066 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27067 emit_insn_before (set_r12, sp_adjust);
27069 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27071 rtx r12 = gen_rtx_REG (Pmode, 12);
27072 if (frame_off == 0)
27073 emit_move_insn (r12, frame_reg_rtx);
27074 else
27075 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27078 if (info->push_p)
27080 rtx r12 = gen_rtx_REG (Pmode, 12);
27081 rtx r29 = gen_rtx_REG (Pmode, 29);
27082 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27083 rtx not_more = gen_label_rtx ();
27084 rtx jump;
27086 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27087 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27088 gen_rtx_LABEL_REF (VOIDmode, not_more),
27089 pc_rtx);
27090 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27091 JUMP_LABEL (jump) = not_more;
27092 LABEL_NUSES (not_more) += 1;
27093 emit_move_insn (r12, r29);
27094 emit_label (not_more);
27098 /* Emit function prologue as insns. */
27100 void
27101 rs6000_emit_prologue (void)
27103 rs6000_stack_t *info = rs6000_stack_info ();
27104 machine_mode reg_mode = Pmode;
27105 int reg_size = TARGET_32BIT ? 4 : 8;
27106 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27107 ? DFmode : SFmode;
27108 int fp_reg_size = 8;
27109 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27110 rtx frame_reg_rtx = sp_reg_rtx;
27111 unsigned int cr_save_regno;
27112 rtx cr_save_rtx = NULL_RTX;
27113 rtx_insn *insn;
27114 int strategy;
27115 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27116 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27117 && call_used_regs[STATIC_CHAIN_REGNUM]);
27118 int using_split_stack = (flag_split_stack
27119 && (lookup_attribute ("no_split_stack",
27120 DECL_ATTRIBUTES (cfun->decl))
27121 == NULL));
27123 /* Offset to top of frame for frame_reg and sp respectively. */
27124 HOST_WIDE_INT frame_off = 0;
27125 HOST_WIDE_INT sp_off = 0;
27126 /* sp_adjust is the stack adjusting instruction, tracked so that the
27127 insn setting up the split-stack arg pointer can be emitted just
27128 prior to it, when r12 is not used here for other purposes. */
27129 rtx_insn *sp_adjust = 0;
27131 #if CHECKING_P
27132 /* Track and check usage of r0, r11, r12. */
27133 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27134 #define START_USE(R) do \
27136 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27137 reg_inuse |= 1 << (R); \
27138 } while (0)
27139 #define END_USE(R) do \
27141 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27142 reg_inuse &= ~(1 << (R)); \
27143 } while (0)
27144 #define NOT_INUSE(R) do \
27146 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27147 } while (0)
27148 #else
27149 #define START_USE(R) do {} while (0)
27150 #define END_USE(R) do {} while (0)
27151 #define NOT_INUSE(R) do {} while (0)
27152 #endif
27154 if (DEFAULT_ABI == ABI_ELFv2
27155 && !TARGET_SINGLE_PIC_BASE)
27157 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27159 /* With -mminimal-toc we may generate an extra use of r2 below. */
27160 if (TARGET_TOC && TARGET_MINIMAL_TOC
27161 && !constant_pool_empty_p ())
27162 cfun->machine->r2_setup_needed = true;
27166 if (flag_stack_usage_info)
27167 current_function_static_stack_size = info->total_size;
27169 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27171 HOST_WIDE_INT size = info->total_size;
27173 if (crtl->is_leaf && !cfun->calls_alloca)
27175 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27176 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27177 size - get_stack_check_protect ());
27179 else if (size > 0)
27180 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27183 if (TARGET_FIX_AND_CONTINUE)
27185 /* gdb on darwin arranges to forward a function from the old
27186 address by modifying the first 5 instructions of the function
27187 to branch to the overriding function. This is necessary to
27188 permit function pointers that point to the old function to
27189 actually forward to the new function. */
27190 emit_insn (gen_nop ());
27191 emit_insn (gen_nop ());
27192 emit_insn (gen_nop ());
27193 emit_insn (gen_nop ());
27194 emit_insn (gen_nop ());
27197 /* Handle world saves specially here. */
27198 if (WORLD_SAVE_P (info))
27200 int i, j, sz;
27201 rtx treg;
27202 rtvec p;
27203 rtx reg0;
27205 /* save_world expects lr in r0. */
27206 reg0 = gen_rtx_REG (Pmode, 0);
27207 if (info->lr_save_p)
27209 insn = emit_move_insn (reg0,
27210 gen_rtx_REG (Pmode, LR_REGNO));
27211 RTX_FRAME_RELATED_P (insn) = 1;
27214 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27215 assumptions about the offsets of various bits of the stack
27216 frame. */
27217 gcc_assert (info->gp_save_offset == -220
27218 && info->fp_save_offset == -144
27219 && info->lr_save_offset == 8
27220 && info->cr_save_offset == 4
27221 && info->push_p
27222 && info->lr_save_p
27223 && (!crtl->calls_eh_return
27224 || info->ehrd_offset == -432)
27225 && info->vrsave_save_offset == -224
27226 && info->altivec_save_offset == -416);
27228 treg = gen_rtx_REG (SImode, 11);
27229 emit_move_insn (treg, GEN_INT (-info->total_size));
27231 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27232 in R11. It also clobbers R12, so beware! */
27234 /* Preserve CR2 for save_world prologues */
27235 sz = 5;
27236 sz += 32 - info->first_gp_reg_save;
27237 sz += 64 - info->first_fp_reg_save;
27238 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27239 p = rtvec_alloc (sz);
27240 j = 0;
27241 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
27242 gen_rtx_REG (SImode,
27243 LR_REGNO));
27244 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27245 gen_rtx_SYMBOL_REF (Pmode,
27246 "*save_world"));
27247 /* We do floats first so that the instruction pattern matches
27248 properly. */
27249 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27250 RTVEC_ELT (p, j++)
27251 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27252 ? DFmode : SFmode,
27253 info->first_fp_reg_save + i),
27254 frame_reg_rtx,
27255 info->fp_save_offset + frame_off + 8 * i);
27256 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27257 RTVEC_ELT (p, j++)
27258 = gen_frame_store (gen_rtx_REG (V4SImode,
27259 info->first_altivec_reg_save + i),
27260 frame_reg_rtx,
27261 info->altivec_save_offset + frame_off + 16 * i);
27262 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27263 RTVEC_ELT (p, j++)
27264 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27265 frame_reg_rtx,
27266 info->gp_save_offset + frame_off + reg_size * i);
27268 /* CR register traditionally saved as CR2. */
27269 RTVEC_ELT (p, j++)
27270 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27271 frame_reg_rtx, info->cr_save_offset + frame_off);
27272 /* Explain about use of R0. */
27273 if (info->lr_save_p)
27274 RTVEC_ELT (p, j++)
27275 = gen_frame_store (reg0,
27276 frame_reg_rtx, info->lr_save_offset + frame_off);
27277 /* Explain what happens to the stack pointer. */
27279 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27280 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27283 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27284 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27285 treg, GEN_INT (-info->total_size));
27286 sp_off = frame_off = info->total_size;
27289 strategy = info->savres_strategy;
27291 /* For V.4, update stack before we do any saving and set back pointer. */
27292 if (! WORLD_SAVE_P (info)
27293 && info->push_p
27294 && (DEFAULT_ABI == ABI_V4
27295 || crtl->calls_eh_return))
27297 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27298 || !(strategy & SAVE_INLINE_GPRS)
27299 || !(strategy & SAVE_INLINE_VRS));
27300 int ptr_regno = -1;
27301 rtx ptr_reg = NULL_RTX;
27302 int ptr_off = 0;
27304 if (info->total_size < 32767)
27305 frame_off = info->total_size;
27306 else if (need_r11)
27307 ptr_regno = 11;
27308 else if (info->cr_save_p
27309 || info->lr_save_p
27310 || info->first_fp_reg_save < 64
27311 || info->first_gp_reg_save < 32
27312 || info->altivec_size != 0
27313 || info->vrsave_size != 0
27314 || crtl->calls_eh_return)
27315 ptr_regno = 12;
27316 else
27318 /* The prologue won't be saving any regs so there is no need
27319 to set up a frame register to access any frame save area.
27320 We also won't be using frame_off anywhere below, but set
27321 the correct value anyway to protect against future
27322 changes to this function. */
27323 frame_off = info->total_size;
27325 if (ptr_regno != -1)
27327 /* Set up the frame offset to that needed by the first
27328 out-of-line save function. */
27329 START_USE (ptr_regno);
27330 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27331 frame_reg_rtx = ptr_reg;
27332 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27333 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27334 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27335 ptr_off = info->gp_save_offset + info->gp_size;
27336 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27337 ptr_off = info->altivec_save_offset + info->altivec_size;
27338 frame_off = -ptr_off;
27340 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27341 ptr_reg, ptr_off);
27342 if (REGNO (frame_reg_rtx) == 12)
27343 sp_adjust = 0;
27344 sp_off = info->total_size;
27345 if (frame_reg_rtx != sp_reg_rtx)
27346 rs6000_emit_stack_tie (frame_reg_rtx, false);
27349 /* If we use the link register, get it into r0. */
27350 if (!WORLD_SAVE_P (info) && info->lr_save_p
27351 && !cfun->machine->lr_is_wrapped_separately)
27353 rtx addr, reg, mem;
27355 reg = gen_rtx_REG (Pmode, 0);
27356 START_USE (0);
27357 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27358 RTX_FRAME_RELATED_P (insn) = 1;
27360 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27361 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27363 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27364 GEN_INT (info->lr_save_offset + frame_off));
27365 mem = gen_rtx_MEM (Pmode, addr);
27366 /* This should not be of rs6000_sr_alias_set, because of
27367 __builtin_return_address. */
27369 insn = emit_move_insn (mem, reg);
27370 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27371 NULL_RTX, NULL_RTX);
27372 END_USE (0);
27376 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27377 r12 will be needed by out-of-line gpr restore. */
27378 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27379 && !(strategy & (SAVE_INLINE_GPRS
27380 | SAVE_NOINLINE_GPRS_SAVES_LR))
27381 ? 11 : 12);
27382 if (!WORLD_SAVE_P (info)
27383 && info->cr_save_p
27384 && REGNO (frame_reg_rtx) != cr_save_regno
27385 && !(using_static_chain_p && cr_save_regno == 11)
27386 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27388 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27389 START_USE (cr_save_regno);
27390 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27393 /* Do any required saving of fpr's. If only one or two to save, do
27394 it ourselves. Otherwise, call function. */
27395 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27397 int offset = info->fp_save_offset + frame_off;
27398 for (int i = info->first_fp_reg_save; i < 64; i++)
27400 if (save_reg_p (i)
27401 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27402 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27403 sp_off - frame_off);
27405 offset += fp_reg_size;
27408 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27410 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27411 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27412 unsigned ptr_regno = ptr_regno_for_savres (sel);
27413 rtx ptr_reg = frame_reg_rtx;
27415 if (REGNO (frame_reg_rtx) == ptr_regno)
27416 gcc_checking_assert (frame_off == 0);
27417 else
27419 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27420 NOT_INUSE (ptr_regno);
27421 emit_insn (gen_add3_insn (ptr_reg,
27422 frame_reg_rtx, GEN_INT (frame_off)));
27424 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27425 info->fp_save_offset,
27426 info->lr_save_offset,
27427 DFmode, sel);
27428 rs6000_frame_related (insn, ptr_reg, sp_off,
27429 NULL_RTX, NULL_RTX);
27430 if (lr)
27431 END_USE (0);
27434 /* Save GPRs. This is done as a PARALLEL if we are using
27435 the store-multiple instructions. */
27436 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27438 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27439 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27440 unsigned ptr_regno = ptr_regno_for_savres (sel);
27441 rtx ptr_reg = frame_reg_rtx;
27442 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27443 int end_save = info->gp_save_offset + info->gp_size;
27444 int ptr_off;
27446 if (ptr_regno == 12)
27447 sp_adjust = 0;
27448 if (!ptr_set_up)
27449 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27451 /* Need to adjust r11 (r12) if we saved any FPRs. */
27452 if (end_save + frame_off != 0)
27454 rtx offset = GEN_INT (end_save + frame_off);
27456 if (ptr_set_up)
27457 frame_off = -end_save;
27458 else
27459 NOT_INUSE (ptr_regno);
27460 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27462 else if (!ptr_set_up)
27464 NOT_INUSE (ptr_regno);
27465 emit_move_insn (ptr_reg, frame_reg_rtx);
27467 ptr_off = -end_save;
27468 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27469 info->gp_save_offset + ptr_off,
27470 info->lr_save_offset + ptr_off,
27471 reg_mode, sel);
27472 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27473 NULL_RTX, NULL_RTX);
27474 if (lr)
27475 END_USE (0);
27477 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27479 rtvec p;
27480 int i;
27481 p = rtvec_alloc (32 - info->first_gp_reg_save);
27482 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27483 RTVEC_ELT (p, i)
27484 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27485 frame_reg_rtx,
27486 info->gp_save_offset + frame_off + reg_size * i);
27487 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27488 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27489 NULL_RTX, NULL_RTX);
27491 else if (!WORLD_SAVE_P (info))
27493 int offset = info->gp_save_offset + frame_off;
27494 for (int i = info->first_gp_reg_save; i < 32; i++)
27496 if (save_reg_p (i)
27497 && !cfun->machine->gpr_is_wrapped_separately[i])
27498 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27499 sp_off - frame_off);
27501 offset += reg_size;
27505 if (crtl->calls_eh_return)
27507 unsigned int i;
27508 rtvec p;
27510 for (i = 0; ; ++i)
27512 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27513 if (regno == INVALID_REGNUM)
27514 break;
27517 p = rtvec_alloc (i);
27519 for (i = 0; ; ++i)
27521 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27522 if (regno == INVALID_REGNUM)
27523 break;
27525 rtx set
27526 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27527 sp_reg_rtx,
27528 info->ehrd_offset + sp_off + reg_size * (int) i);
27529 RTVEC_ELT (p, i) = set;
27530 RTX_FRAME_RELATED_P (set) = 1;
27533 insn = emit_insn (gen_blockage ());
27534 RTX_FRAME_RELATED_P (insn) = 1;
27535 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27538 /* In AIX ABI we need to make sure r2 is really saved. */
27539 if (TARGET_AIX && crtl->calls_eh_return)
27541 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27542 rtx join_insn, note;
27543 rtx_insn *save_insn;
27544 long toc_restore_insn;
27546 tmp_reg = gen_rtx_REG (Pmode, 11);
27547 tmp_reg_si = gen_rtx_REG (SImode, 11);
27548 if (using_static_chain_p)
27550 START_USE (0);
27551 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27553 else
27554 START_USE (11);
27555 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27556 /* Peek at instruction to which this function returns. If it's
27557 restoring r2, then we know we've already saved r2. We can't
27558 unconditionally save r2 because the value we have will already
27559 be updated if we arrived at this function via a plt call or
27560 toc adjusting stub. */
27561 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27562 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27563 + RS6000_TOC_SAVE_SLOT);
27564 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27565 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27566 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27567 validate_condition_mode (EQ, CCUNSmode);
27568 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27569 emit_insn (gen_rtx_SET (compare_result,
27570 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27571 toc_save_done = gen_label_rtx ();
27572 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27573 gen_rtx_EQ (VOIDmode, compare_result,
27574 const0_rtx),
27575 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27576 pc_rtx);
27577 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27578 JUMP_LABEL (jump) = toc_save_done;
27579 LABEL_NUSES (toc_save_done) += 1;
27581 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27582 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27583 sp_off - frame_off);
27585 emit_label (toc_save_done);
27587 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27588 have a CFG that has different saves along different paths.
27589 Move the note to a dummy blockage insn, which describes that
27590 R2 is unconditionally saved after the label. */
27591 /* ??? An alternate representation might be a special insn pattern
27592 containing both the branch and the store. That might let the
27593 code that minimizes the number of DW_CFA_advance opcodes better
27594 freedom in placing the annotations. */
27595 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27596 if (note)
27597 remove_note (save_insn, note);
27598 else
27599 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27600 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27601 RTX_FRAME_RELATED_P (save_insn) = 0;
27603 join_insn = emit_insn (gen_blockage ());
27604 REG_NOTES (join_insn) = note;
27605 RTX_FRAME_RELATED_P (join_insn) = 1;
27607 if (using_static_chain_p)
27609 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27610 END_USE (0);
27612 else
27613 END_USE (11);
27616 /* Save CR if we use any that must be preserved. */
27617 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27619 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27620 GEN_INT (info->cr_save_offset + frame_off));
27621 rtx mem = gen_frame_mem (SImode, addr);
27623 /* If we didn't copy cr before, do so now using r0. */
27624 if (cr_save_rtx == NULL_RTX)
27626 START_USE (0);
27627 cr_save_rtx = gen_rtx_REG (SImode, 0);
27628 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27631 /* Saving CR requires a two-instruction sequence: one instruction
27632 to move the CR to a general-purpose register, and a second
27633 instruction that stores the GPR to memory.
27635 We do not emit any DWARF CFI records for the first of these,
27636 because we cannot properly represent the fact that CR is saved in
27637 a register. One reason is that we cannot express that multiple
27638 CR fields are saved; another reason is that on 64-bit, the size
27639 of the CR register in DWARF (4 bytes) differs from the size of
27640 a general-purpose register.
27642 This means if any intervening instruction were to clobber one of
27643 the call-saved CR fields, we'd have incorrect CFI. To prevent
27644 this from happening, we mark the store to memory as a use of
27645 those CR fields, which prevents any such instruction from being
27646 scheduled in between the two instructions. */
27647 rtx crsave_v[9];
27648 int n_crsave = 0;
27649 int i;
27651 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27652 for (i = 0; i < 8; i++)
27653 if (save_reg_p (CR0_REGNO + i))
27654 crsave_v[n_crsave++]
27655 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27657 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27658 gen_rtvec_v (n_crsave, crsave_v)));
27659 END_USE (REGNO (cr_save_rtx));
27661 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27662 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27663 so we need to construct a frame expression manually. */
27664 RTX_FRAME_RELATED_P (insn) = 1;
27666 /* Update address to be stack-pointer relative, like
27667 rs6000_frame_related would do. */
27668 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27669 GEN_INT (info->cr_save_offset + sp_off));
27670 mem = gen_frame_mem (SImode, addr);
27672 if (DEFAULT_ABI == ABI_ELFv2)
27674 /* In the ELFv2 ABI we generate separate CFI records for each
27675 CR field that was actually saved. They all point to the
27676 same 32-bit stack slot. */
27677 rtx crframe[8];
27678 int n_crframe = 0;
27680 for (i = 0; i < 8; i++)
27681 if (save_reg_p (CR0_REGNO + i))
27683 crframe[n_crframe]
27684 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27686 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27687 n_crframe++;
27690 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27691 gen_rtx_PARALLEL (VOIDmode,
27692 gen_rtvec_v (n_crframe, crframe)));
27694 else
27696 /* In other ABIs, by convention, we use a single CR regnum to
27697 represent the fact that all call-saved CR fields are saved.
27698 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27699 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27700 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27704 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27705 *separate* slots if the routine calls __builtin_eh_return, so
27706 that they can be independently restored by the unwinder. */
27707 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27709 int i, cr_off = info->ehcr_offset;
27710 rtx crsave;
27712 /* ??? We might get better performance by using multiple mfocrf
27713 instructions. */
27714 crsave = gen_rtx_REG (SImode, 0);
27715 emit_insn (gen_prologue_movesi_from_cr (crsave));
27717 for (i = 0; i < 8; i++)
27718 if (!call_used_regs[CR0_REGNO + i])
27720 rtvec p = rtvec_alloc (2);
27721 RTVEC_ELT (p, 0)
27722 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27723 RTVEC_ELT (p, 1)
27724 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27726 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27728 RTX_FRAME_RELATED_P (insn) = 1;
27729 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27730 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27731 sp_reg_rtx, cr_off + sp_off));
27733 cr_off += reg_size;
27737 /* If we are emitting stack probes, but allocate no stack, then
27738 just note that in the dump file. */
27739 if (flag_stack_clash_protection
27740 && dump_file
27741 && !info->push_p)
27742 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27744 /* Update stack and set back pointer unless this is V.4,
27745 for which it was done previously. */
27746 if (!WORLD_SAVE_P (info) && info->push_p
27747 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27749 rtx ptr_reg = NULL;
27750 int ptr_off = 0;
27752 /* If saving altivec regs we need to be able to address all save
27753 locations using a 16-bit offset. */
27754 if ((strategy & SAVE_INLINE_VRS) == 0
27755 || (info->altivec_size != 0
27756 && (info->altivec_save_offset + info->altivec_size - 16
27757 + info->total_size - frame_off) > 32767)
27758 || (info->vrsave_size != 0
27759 && (info->vrsave_save_offset
27760 + info->total_size - frame_off) > 32767))
27762 int sel = SAVRES_SAVE | SAVRES_VR;
27763 unsigned ptr_regno = ptr_regno_for_savres (sel);
27765 if (using_static_chain_p
27766 && ptr_regno == STATIC_CHAIN_REGNUM)
27767 ptr_regno = 12;
27768 if (REGNO (frame_reg_rtx) != ptr_regno)
27769 START_USE (ptr_regno);
27770 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27771 frame_reg_rtx = ptr_reg;
27772 ptr_off = info->altivec_save_offset + info->altivec_size;
27773 frame_off = -ptr_off;
27775 else if (REGNO (frame_reg_rtx) == 1)
27776 frame_off = info->total_size;
27777 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27778 ptr_reg, ptr_off);
27779 if (REGNO (frame_reg_rtx) == 12)
27780 sp_adjust = 0;
27781 sp_off = info->total_size;
27782 if (frame_reg_rtx != sp_reg_rtx)
27783 rs6000_emit_stack_tie (frame_reg_rtx, false);
27786 /* Set frame pointer, if needed. */
27787 if (frame_pointer_needed)
27789 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27790 sp_reg_rtx);
27791 RTX_FRAME_RELATED_P (insn) = 1;
27794 /* Save AltiVec registers if needed. Save here because the red zone does
27795 not always include AltiVec registers. */
27796 if (!WORLD_SAVE_P (info)
27797 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27799 int end_save = info->altivec_save_offset + info->altivec_size;
27800 int ptr_off;
27801 /* Oddly, the vector save/restore functions point r0 at the end
27802 of the save area, then use r11 or r12 to load offsets for
27803 [reg+reg] addressing. */
27804 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27805 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27806 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27808 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27809 NOT_INUSE (0);
27810 if (scratch_regno == 12)
27811 sp_adjust = 0;
27812 if (end_save + frame_off != 0)
27814 rtx offset = GEN_INT (end_save + frame_off);
27816 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27818 else
27819 emit_move_insn (ptr_reg, frame_reg_rtx);
27821 ptr_off = -end_save;
27822 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27823 info->altivec_save_offset + ptr_off,
27824 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27825 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27826 NULL_RTX, NULL_RTX);
27827 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27829 /* The oddity mentioned above clobbered our frame reg. */
27830 emit_move_insn (frame_reg_rtx, ptr_reg);
27831 frame_off = ptr_off;
27834 else if (!WORLD_SAVE_P (info)
27835 && info->altivec_size != 0)
27837 int i;
27839 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27840 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27842 rtx areg, savereg, mem;
27843 HOST_WIDE_INT offset;
27845 offset = (info->altivec_save_offset + frame_off
27846 + 16 * (i - info->first_altivec_reg_save));
27848 savereg = gen_rtx_REG (V4SImode, i);
27850 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27852 mem = gen_frame_mem (V4SImode,
27853 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27854 GEN_INT (offset)));
27855 insn = emit_insn (gen_rtx_SET (mem, savereg));
27856 areg = NULL_RTX;
27858 else
27860 NOT_INUSE (0);
27861 areg = gen_rtx_REG (Pmode, 0);
27862 emit_move_insn (areg, GEN_INT (offset));
27864 /* AltiVec addressing mode is [reg+reg]. */
27865 mem = gen_frame_mem (V4SImode,
27866 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27868 /* Rather than emitting a generic move, force use of the stvx
27869 instruction, which we always want on ISA 2.07 (power8) systems.
27870 In particular we don't want xxpermdi/stxvd2x for little
27871 endian. */
27872 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27875 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27876 areg, GEN_INT (offset));
27880 /* VRSAVE is a bit vector representing which AltiVec registers
27881 are used. The OS uses this to determine which vector
27882 registers to save on a context switch. We need to save
27883 VRSAVE on the stack frame, add whatever AltiVec registers we
27884 used in this function, and do the corresponding magic in the
27885 epilogue. */
27887 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27889 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27890 be using r12 as frame_reg_rtx and r11 as the static chain
27891 pointer for nested functions. */
27892 int save_regno = 12;
27893 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27894 && !using_static_chain_p)
27895 save_regno = 11;
27896 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27898 save_regno = 11;
27899 if (using_static_chain_p)
27900 save_regno = 0;
27902 NOT_INUSE (save_regno);
27904 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27907 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27908 if (!TARGET_SINGLE_PIC_BASE
27909 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27910 && !constant_pool_empty_p ())
27911 || (DEFAULT_ABI == ABI_V4
27912 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27913 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27915 /* If emit_load_toc_table will use the link register, we need to save
27916 it. We use R12 for this purpose because emit_load_toc_table
27917 can use register 0. This allows us to use a plain 'blr' to return
27918 from the procedure more often. */
27919 int save_LR_around_toc_setup = (TARGET_ELF
27920 && DEFAULT_ABI == ABI_V4
27921 && flag_pic
27922 && ! info->lr_save_p
27923 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27924 if (save_LR_around_toc_setup)
27926 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27927 rtx tmp = gen_rtx_REG (Pmode, 12);
27929 sp_adjust = 0;
27930 insn = emit_move_insn (tmp, lr);
27931 RTX_FRAME_RELATED_P (insn) = 1;
27933 rs6000_emit_load_toc_table (TRUE);
27935 insn = emit_move_insn (lr, tmp);
27936 add_reg_note (insn, REG_CFA_RESTORE, lr);
27937 RTX_FRAME_RELATED_P (insn) = 1;
27939 else
27940 rs6000_emit_load_toc_table (TRUE);
27943 #if TARGET_MACHO
27944 if (!TARGET_SINGLE_PIC_BASE
27945 && DEFAULT_ABI == ABI_DARWIN
27946 && flag_pic && crtl->uses_pic_offset_table)
27948 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27949 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27951 /* Save and restore LR locally around this call (in R0). */
27952 if (!info->lr_save_p)
27953 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27955 emit_insn (gen_load_macho_picbase (src));
27957 emit_move_insn (gen_rtx_REG (Pmode,
27958 RS6000_PIC_OFFSET_TABLE_REGNUM),
27959 lr);
27961 if (!info->lr_save_p)
27962 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27964 #endif
27966 /* If we need to, save the TOC register after doing the stack setup.
27967 Do not emit eh frame info for this save. The unwinder wants info,
27968 conceptually attached to instructions in this function, about
27969 register values in the caller of this function. This R2 may have
27970 already been changed from the value in the caller.
27971 We don't attempt to write accurate DWARF EH frame info for R2
27972 because code emitted by gcc for a (non-pointer) function call
27973 doesn't save and restore R2. Instead, R2 is managed out-of-line
27974 by a linker generated plt call stub when the function resides in
27975 a shared library. This behavior is costly to describe in DWARF,
27976 both in terms of the size of DWARF info and the time taken in the
27977 unwinder to interpret it. R2 changes, apart from the
27978 calls_eh_return case earlier in this function, are handled by
27979 linux-unwind.h frob_update_context. */
27980 if (rs6000_save_toc_in_prologue_p ()
27981 && !cfun->machine->toc_is_wrapped_separately)
27983 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27984 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27987 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27988 if (using_split_stack && split_stack_arg_pointer_used_p ())
27989 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27992 /* Output .extern statements for the save/restore routines we use. */
27994 static void
27995 rs6000_output_savres_externs (FILE *file)
27997 rs6000_stack_t *info = rs6000_stack_info ();
27999 if (TARGET_DEBUG_STACK)
28000 debug_stack_info (info);
28002 /* Write .extern for any function we will call to save and restore
28003 fp values. */
28004 if (info->first_fp_reg_save < 64
28005 && !TARGET_MACHO
28006 && !TARGET_ELF)
28008 char *name;
28009 int regno = info->first_fp_reg_save - 32;
28011 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
28013 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
28014 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
28015 name = rs6000_savres_routine_name (regno, sel);
28016 fprintf (file, "\t.extern %s\n", name);
28018 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
28020 bool lr = (info->savres_strategy
28021 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28022 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28023 name = rs6000_savres_routine_name (regno, sel);
28024 fprintf (file, "\t.extern %s\n", name);
28029 /* Write function prologue. */
28031 static void
28032 rs6000_output_function_prologue (FILE *file)
28034 if (!cfun->is_thunk)
28035 rs6000_output_savres_externs (file);
28037 /* ELFv2 ABI r2 setup code and local entry point. This must follow
28038 immediately after the global entry point label. */
28039 if (rs6000_global_entry_point_needed_p ())
28041 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28043 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
28045 if (TARGET_CMODEL != CMODEL_LARGE)
28047 /* In the small and medium code models, we assume the TOC is less
28048 2 GB away from the text section, so it can be computed via the
28049 following two-instruction sequence. */
28050 char buf[256];
28052 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28053 fprintf (file, "0:\taddis 2,12,.TOC.-");
28054 assemble_name (file, buf);
28055 fprintf (file, "@ha\n");
28056 fprintf (file, "\taddi 2,2,.TOC.-");
28057 assemble_name (file, buf);
28058 fprintf (file, "@l\n");
28060 else
28062 /* In the large code model, we allow arbitrary offsets between the
28063 TOC and the text section, so we have to load the offset from
28064 memory. The data field is emitted directly before the global
28065 entry point in rs6000_elf_declare_function_name. */
28066 char buf[256];
28068 #ifdef HAVE_AS_ENTRY_MARKERS
28069 /* If supported by the linker, emit a marker relocation. If the
28070 total code size of the final executable or shared library
28071 happens to fit into 2 GB after all, the linker will replace
28072 this code sequence with the sequence for the small or medium
28073 code model. */
28074 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
28075 #endif
28076 fprintf (file, "\tld 2,");
28077 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
28078 assemble_name (file, buf);
28079 fprintf (file, "-");
28080 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28081 assemble_name (file, buf);
28082 fprintf (file, "(12)\n");
28083 fprintf (file, "\tadd 2,2,12\n");
28086 fputs ("\t.localentry\t", file);
28087 assemble_name (file, name);
28088 fputs (",.-", file);
28089 assemble_name (file, name);
28090 fputs ("\n", file);
28093 /* Output -mprofile-kernel code. This needs to be done here instead of
28094 in output_function_profile since it must go after the ELFv2 ABI
28095 local entry point. */
28096 if (TARGET_PROFILE_KERNEL && crtl->profile)
28098 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28099 gcc_assert (!TARGET_32BIT);
28101 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
28103 /* In the ELFv2 ABI we have no compiler stack word. It must be
28104 the resposibility of _mcount to preserve the static chain
28105 register if required. */
28106 if (DEFAULT_ABI != ABI_ELFv2
28107 && cfun->static_chain_decl != NULL)
28109 asm_fprintf (file, "\tstd %s,24(%s)\n",
28110 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28111 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28112 asm_fprintf (file, "\tld %s,24(%s)\n",
28113 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28115 else
28116 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28119 rs6000_pic_labelno++;
28122 /* -mprofile-kernel code calls mcount before the function prolog,
28123 so a profiled leaf function should stay a leaf function. */
28124 static bool
28125 rs6000_keep_leaf_when_profiled ()
28127 return TARGET_PROFILE_KERNEL;
28130 /* Non-zero if vmx regs are restored before the frame pop, zero if
28131 we restore after the pop when possible. */
28132 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28134 /* Restoring cr is a two step process: loading a reg from the frame
28135 save, then moving the reg to cr. For ABI_V4 we must let the
28136 unwinder know that the stack location is no longer valid at or
28137 before the stack deallocation, but we can't emit a cfa_restore for
28138 cr at the stack deallocation like we do for other registers.
28139 The trouble is that it is possible for the move to cr to be
28140 scheduled after the stack deallocation. So say exactly where cr
28141 is located on each of the two insns. */
28143 static rtx
28144 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28146 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28147 rtx reg = gen_rtx_REG (SImode, regno);
28148 rtx_insn *insn = emit_move_insn (reg, mem);
28150 if (!exit_func && DEFAULT_ABI == ABI_V4)
28152 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28153 rtx set = gen_rtx_SET (reg, cr);
28155 add_reg_note (insn, REG_CFA_REGISTER, set);
28156 RTX_FRAME_RELATED_P (insn) = 1;
28158 return reg;
28161 /* Reload CR from REG. */
28163 static void
28164 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28166 int count = 0;
28167 int i;
28169 if (using_mfcr_multiple)
28171 for (i = 0; i < 8; i++)
28172 if (save_reg_p (CR0_REGNO + i))
28173 count++;
28174 gcc_assert (count);
28177 if (using_mfcr_multiple && count > 1)
28179 rtx_insn *insn;
28180 rtvec p;
28181 int ndx;
28183 p = rtvec_alloc (count);
28185 ndx = 0;
28186 for (i = 0; i < 8; i++)
28187 if (save_reg_p (CR0_REGNO + i))
28189 rtvec r = rtvec_alloc (2);
28190 RTVEC_ELT (r, 0) = reg;
28191 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28192 RTVEC_ELT (p, ndx) =
28193 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28194 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28195 ndx++;
28197 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28198 gcc_assert (ndx == count);
28200 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28201 CR field separately. */
28202 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28204 for (i = 0; i < 8; i++)
28205 if (save_reg_p (CR0_REGNO + i))
28206 add_reg_note (insn, REG_CFA_RESTORE,
28207 gen_rtx_REG (SImode, CR0_REGNO + i));
28209 RTX_FRAME_RELATED_P (insn) = 1;
28212 else
28213 for (i = 0; i < 8; i++)
28214 if (save_reg_p (CR0_REGNO + i))
28216 rtx insn = emit_insn (gen_movsi_to_cr_one
28217 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28219 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28220 CR field separately, attached to the insn that in fact
28221 restores this particular CR field. */
28222 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28224 add_reg_note (insn, REG_CFA_RESTORE,
28225 gen_rtx_REG (SImode, CR0_REGNO + i));
28227 RTX_FRAME_RELATED_P (insn) = 1;
28231 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28232 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28233 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28235 rtx_insn *insn = get_last_insn ();
28236 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28238 add_reg_note (insn, REG_CFA_RESTORE, cr);
28239 RTX_FRAME_RELATED_P (insn) = 1;
28243 /* Like cr, the move to lr instruction can be scheduled after the
28244 stack deallocation, but unlike cr, its stack frame save is still
28245 valid. So we only need to emit the cfa_restore on the correct
28246 instruction. */
28248 static void
28249 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28251 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28252 rtx reg = gen_rtx_REG (Pmode, regno);
28254 emit_move_insn (reg, mem);
28257 static void
28258 restore_saved_lr (int regno, bool exit_func)
28260 rtx reg = gen_rtx_REG (Pmode, regno);
28261 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28262 rtx_insn *insn = emit_move_insn (lr, reg);
28264 if (!exit_func && flag_shrink_wrap)
28266 add_reg_note (insn, REG_CFA_RESTORE, lr);
28267 RTX_FRAME_RELATED_P (insn) = 1;
28271 static rtx
28272 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28274 if (DEFAULT_ABI == ABI_ELFv2)
28276 int i;
28277 for (i = 0; i < 8; i++)
28278 if (save_reg_p (CR0_REGNO + i))
28280 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28281 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28282 cfa_restores);
28285 else if (info->cr_save_p)
28286 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28287 gen_rtx_REG (SImode, CR2_REGNO),
28288 cfa_restores);
28290 if (info->lr_save_p)
28291 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28292 gen_rtx_REG (Pmode, LR_REGNO),
28293 cfa_restores);
28294 return cfa_restores;
28297 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28298 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28299 below stack pointer not cloberred by signals. */
28301 static inline bool
28302 offset_below_red_zone_p (HOST_WIDE_INT offset)
28304 return offset < (DEFAULT_ABI == ABI_V4
28306 : TARGET_32BIT ? -220 : -288);
28309 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28311 static void
28312 emit_cfa_restores (rtx cfa_restores)
28314 rtx_insn *insn = get_last_insn ();
28315 rtx *loc = &REG_NOTES (insn);
28317 while (*loc)
28318 loc = &XEXP (*loc, 1);
28319 *loc = cfa_restores;
28320 RTX_FRAME_RELATED_P (insn) = 1;
28323 /* Emit function epilogue as insns. */
28325 void
28326 rs6000_emit_epilogue (int sibcall)
28328 rs6000_stack_t *info;
28329 int restoring_GPRs_inline;
28330 int restoring_FPRs_inline;
28331 int using_load_multiple;
28332 int using_mtcr_multiple;
28333 int use_backchain_to_restore_sp;
28334 int restore_lr;
28335 int strategy;
28336 HOST_WIDE_INT frame_off = 0;
28337 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28338 rtx frame_reg_rtx = sp_reg_rtx;
28339 rtx cfa_restores = NULL_RTX;
28340 rtx insn;
28341 rtx cr_save_reg = NULL_RTX;
28342 machine_mode reg_mode = Pmode;
28343 int reg_size = TARGET_32BIT ? 4 : 8;
28344 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
28345 ? DFmode : SFmode;
28346 int fp_reg_size = 8;
28347 int i;
28348 bool exit_func;
28349 unsigned ptr_regno;
28351 info = rs6000_stack_info ();
28353 strategy = info->savres_strategy;
28354 using_load_multiple = strategy & REST_MULTIPLE;
28355 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28356 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28357 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28358 || rs6000_tune == PROCESSOR_PPC603
28359 || rs6000_tune == PROCESSOR_PPC750
28360 || optimize_size);
28361 /* Restore via the backchain when we have a large frame, since this
28362 is more efficient than an addis, addi pair. The second condition
28363 here will not trigger at the moment; We don't actually need a
28364 frame pointer for alloca, but the generic parts of the compiler
28365 give us one anyway. */
28366 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28367 ? info->lr_save_offset
28368 : 0) > 32767
28369 || (cfun->calls_alloca
28370 && !frame_pointer_needed));
28371 restore_lr = (info->lr_save_p
28372 && (restoring_FPRs_inline
28373 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28374 && (restoring_GPRs_inline
28375 || info->first_fp_reg_save < 64)
28376 && !cfun->machine->lr_is_wrapped_separately);
28379 if (WORLD_SAVE_P (info))
28381 int i, j;
28382 char rname[30];
28383 const char *alloc_rname;
28384 rtvec p;
28386 /* eh_rest_world_r10 will return to the location saved in the LR
28387 stack slot (which is not likely to be our caller.)
28388 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28389 rest_world is similar, except any R10 parameter is ignored.
28390 The exception-handling stuff that was here in 2.95 is no
28391 longer necessary. */
28393 p = rtvec_alloc (9
28394 + 32 - info->first_gp_reg_save
28395 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28396 + 63 + 1 - info->first_fp_reg_save);
28398 strcpy (rname, ((crtl->calls_eh_return) ?
28399 "*eh_rest_world_r10" : "*rest_world"));
28400 alloc_rname = ggc_strdup (rname);
28402 j = 0;
28403 RTVEC_ELT (p, j++) = ret_rtx;
28404 RTVEC_ELT (p, j++)
28405 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28406 /* The instruction pattern requires a clobber here;
28407 it is shared with the restVEC helper. */
28408 RTVEC_ELT (p, j++)
28409 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28412 /* CR register traditionally saved as CR2. */
28413 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28414 RTVEC_ELT (p, j++)
28415 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28416 if (flag_shrink_wrap)
28418 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28419 gen_rtx_REG (Pmode, LR_REGNO),
28420 cfa_restores);
28421 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28425 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28427 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28428 RTVEC_ELT (p, j++)
28429 = gen_frame_load (reg,
28430 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28431 if (flag_shrink_wrap
28432 && save_reg_p (info->first_gp_reg_save + i))
28433 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28435 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28437 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28438 RTVEC_ELT (p, j++)
28439 = gen_frame_load (reg,
28440 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28441 if (flag_shrink_wrap
28442 && save_reg_p (info->first_altivec_reg_save + i))
28443 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28445 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28447 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28448 ? DFmode : SFmode),
28449 info->first_fp_reg_save + i);
28450 RTVEC_ELT (p, j++)
28451 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28452 if (flag_shrink_wrap
28453 && save_reg_p (info->first_fp_reg_save + i))
28454 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28456 RTVEC_ELT (p, j++)
28457 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28458 RTVEC_ELT (p, j++)
28459 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28460 RTVEC_ELT (p, j++)
28461 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28462 RTVEC_ELT (p, j++)
28463 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28464 RTVEC_ELT (p, j++)
28465 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28466 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28468 if (flag_shrink_wrap)
28470 REG_NOTES (insn) = cfa_restores;
28471 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28472 RTX_FRAME_RELATED_P (insn) = 1;
28474 return;
28477 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28478 if (info->push_p)
28479 frame_off = info->total_size;
28481 /* Restore AltiVec registers if we must do so before adjusting the
28482 stack. */
28483 if (info->altivec_size != 0
28484 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28485 || (DEFAULT_ABI != ABI_V4
28486 && offset_below_red_zone_p (info->altivec_save_offset))))
28488 int i;
28489 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28491 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28492 if (use_backchain_to_restore_sp)
28494 int frame_regno = 11;
28496 if ((strategy & REST_INLINE_VRS) == 0)
28498 /* Of r11 and r12, select the one not clobbered by an
28499 out-of-line restore function for the frame register. */
28500 frame_regno = 11 + 12 - scratch_regno;
28502 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28503 emit_move_insn (frame_reg_rtx,
28504 gen_rtx_MEM (Pmode, sp_reg_rtx));
28505 frame_off = 0;
28507 else if (frame_pointer_needed)
28508 frame_reg_rtx = hard_frame_pointer_rtx;
28510 if ((strategy & REST_INLINE_VRS) == 0)
28512 int end_save = info->altivec_save_offset + info->altivec_size;
28513 int ptr_off;
28514 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28515 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28517 if (end_save + frame_off != 0)
28519 rtx offset = GEN_INT (end_save + frame_off);
28521 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28523 else
28524 emit_move_insn (ptr_reg, frame_reg_rtx);
28526 ptr_off = -end_save;
28527 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28528 info->altivec_save_offset + ptr_off,
28529 0, V4SImode, SAVRES_VR);
28531 else
28533 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28534 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28536 rtx addr, areg, mem, insn;
28537 rtx reg = gen_rtx_REG (V4SImode, i);
28538 HOST_WIDE_INT offset
28539 = (info->altivec_save_offset + frame_off
28540 + 16 * (i - info->first_altivec_reg_save));
28542 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28544 mem = gen_frame_mem (V4SImode,
28545 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28546 GEN_INT (offset)));
28547 insn = gen_rtx_SET (reg, mem);
28549 else
28551 areg = gen_rtx_REG (Pmode, 0);
28552 emit_move_insn (areg, GEN_INT (offset));
28554 /* AltiVec addressing mode is [reg+reg]. */
28555 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28556 mem = gen_frame_mem (V4SImode, addr);
28558 /* Rather than emitting a generic move, force use of the
28559 lvx instruction, which we always want. In particular we
28560 don't want lxvd2x/xxpermdi for little endian. */
28561 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28564 (void) emit_insn (insn);
28568 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28569 if (((strategy & REST_INLINE_VRS) == 0
28570 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28571 && (flag_shrink_wrap
28572 || (offset_below_red_zone_p
28573 (info->altivec_save_offset
28574 + 16 * (i - info->first_altivec_reg_save))))
28575 && save_reg_p (i))
28577 rtx reg = gen_rtx_REG (V4SImode, i);
28578 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28582 /* Restore VRSAVE if we must do so before adjusting the stack. */
28583 if (info->vrsave_size != 0
28584 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28585 || (DEFAULT_ABI != ABI_V4
28586 && offset_below_red_zone_p (info->vrsave_save_offset))))
28588 rtx reg;
28590 if (frame_reg_rtx == sp_reg_rtx)
28592 if (use_backchain_to_restore_sp)
28594 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28595 emit_move_insn (frame_reg_rtx,
28596 gen_rtx_MEM (Pmode, sp_reg_rtx));
28597 frame_off = 0;
28599 else if (frame_pointer_needed)
28600 frame_reg_rtx = hard_frame_pointer_rtx;
28603 reg = gen_rtx_REG (SImode, 12);
28604 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28605 info->vrsave_save_offset + frame_off));
28607 emit_insn (generate_set_vrsave (reg, info, 1));
28610 insn = NULL_RTX;
28611 /* If we have a large stack frame, restore the old stack pointer
28612 using the backchain. */
28613 if (use_backchain_to_restore_sp)
28615 if (frame_reg_rtx == sp_reg_rtx)
28617 /* Under V.4, don't reset the stack pointer until after we're done
28618 loading the saved registers. */
28619 if (DEFAULT_ABI == ABI_V4)
28620 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28622 insn = emit_move_insn (frame_reg_rtx,
28623 gen_rtx_MEM (Pmode, sp_reg_rtx));
28624 frame_off = 0;
28626 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28627 && DEFAULT_ABI == ABI_V4)
28628 /* frame_reg_rtx has been set up by the altivec restore. */
28630 else
28632 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28633 frame_reg_rtx = sp_reg_rtx;
28636 /* If we have a frame pointer, we can restore the old stack pointer
28637 from it. */
28638 else if (frame_pointer_needed)
28640 frame_reg_rtx = sp_reg_rtx;
28641 if (DEFAULT_ABI == ABI_V4)
28642 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28643 /* Prevent reordering memory accesses against stack pointer restore. */
28644 else if (cfun->calls_alloca
28645 || offset_below_red_zone_p (-info->total_size))
28646 rs6000_emit_stack_tie (frame_reg_rtx, true);
28648 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28649 GEN_INT (info->total_size)));
28650 frame_off = 0;
28652 else if (info->push_p
28653 && DEFAULT_ABI != ABI_V4
28654 && !crtl->calls_eh_return)
28656 /* Prevent reordering memory accesses against stack pointer restore. */
28657 if (cfun->calls_alloca
28658 || offset_below_red_zone_p (-info->total_size))
28659 rs6000_emit_stack_tie (frame_reg_rtx, false);
28660 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28661 GEN_INT (info->total_size)));
28662 frame_off = 0;
28664 if (insn && frame_reg_rtx == sp_reg_rtx)
28666 if (cfa_restores)
28668 REG_NOTES (insn) = cfa_restores;
28669 cfa_restores = NULL_RTX;
28671 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28672 RTX_FRAME_RELATED_P (insn) = 1;
28675 /* Restore AltiVec registers if we have not done so already. */
28676 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28677 && info->altivec_size != 0
28678 && (DEFAULT_ABI == ABI_V4
28679 || !offset_below_red_zone_p (info->altivec_save_offset)))
28681 int i;
28683 if ((strategy & REST_INLINE_VRS) == 0)
28685 int end_save = info->altivec_save_offset + info->altivec_size;
28686 int ptr_off;
28687 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28688 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28689 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28691 if (end_save + frame_off != 0)
28693 rtx offset = GEN_INT (end_save + frame_off);
28695 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28697 else
28698 emit_move_insn (ptr_reg, frame_reg_rtx);
28700 ptr_off = -end_save;
28701 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28702 info->altivec_save_offset + ptr_off,
28703 0, V4SImode, SAVRES_VR);
28704 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28706 /* Frame reg was clobbered by out-of-line save. Restore it
28707 from ptr_reg, and if we are calling out-of-line gpr or
28708 fpr restore set up the correct pointer and offset. */
28709 unsigned newptr_regno = 1;
28710 if (!restoring_GPRs_inline)
28712 bool lr = info->gp_save_offset + info->gp_size == 0;
28713 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28714 newptr_regno = ptr_regno_for_savres (sel);
28715 end_save = info->gp_save_offset + info->gp_size;
28717 else if (!restoring_FPRs_inline)
28719 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28720 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28721 newptr_regno = ptr_regno_for_savres (sel);
28722 end_save = info->fp_save_offset + info->fp_size;
28725 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28726 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28728 if (end_save + ptr_off != 0)
28730 rtx offset = GEN_INT (end_save + ptr_off);
28732 frame_off = -end_save;
28733 if (TARGET_32BIT)
28734 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28735 ptr_reg, offset));
28736 else
28737 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28738 ptr_reg, offset));
28740 else
28742 frame_off = ptr_off;
28743 emit_move_insn (frame_reg_rtx, ptr_reg);
28747 else
28749 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28750 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28752 rtx addr, areg, mem, insn;
28753 rtx reg = gen_rtx_REG (V4SImode, i);
28754 HOST_WIDE_INT offset
28755 = (info->altivec_save_offset + frame_off
28756 + 16 * (i - info->first_altivec_reg_save));
28758 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28760 mem = gen_frame_mem (V4SImode,
28761 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28762 GEN_INT (offset)));
28763 insn = gen_rtx_SET (reg, mem);
28765 else
28767 areg = gen_rtx_REG (Pmode, 0);
28768 emit_move_insn (areg, GEN_INT (offset));
28770 /* AltiVec addressing mode is [reg+reg]. */
28771 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28772 mem = gen_frame_mem (V4SImode, addr);
28774 /* Rather than emitting a generic move, force use of the
28775 lvx instruction, which we always want. In particular we
28776 don't want lxvd2x/xxpermdi for little endian. */
28777 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28780 (void) emit_insn (insn);
28784 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28785 if (((strategy & REST_INLINE_VRS) == 0
28786 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28787 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28788 && save_reg_p (i))
28790 rtx reg = gen_rtx_REG (V4SImode, i);
28791 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28795 /* Restore VRSAVE if we have not done so already. */
28796 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28797 && info->vrsave_size != 0
28798 && (DEFAULT_ABI == ABI_V4
28799 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28801 rtx reg;
28803 reg = gen_rtx_REG (SImode, 12);
28804 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28805 info->vrsave_save_offset + frame_off));
28807 emit_insn (generate_set_vrsave (reg, info, 1));
28810 /* If we exit by an out-of-line restore function on ABI_V4 then that
28811 function will deallocate the stack, so we don't need to worry
28812 about the unwinder restoring cr from an invalid stack frame
28813 location. */
28814 exit_func = (!restoring_FPRs_inline
28815 || (!restoring_GPRs_inline
28816 && info->first_fp_reg_save == 64));
28818 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28819 *separate* slots if the routine calls __builtin_eh_return, so
28820 that they can be independently restored by the unwinder. */
28821 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28823 int i, cr_off = info->ehcr_offset;
28825 for (i = 0; i < 8; i++)
28826 if (!call_used_regs[CR0_REGNO + i])
28828 rtx reg = gen_rtx_REG (SImode, 0);
28829 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28830 cr_off + frame_off));
28832 insn = emit_insn (gen_movsi_to_cr_one
28833 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28835 if (!exit_func && flag_shrink_wrap)
28837 add_reg_note (insn, REG_CFA_RESTORE,
28838 gen_rtx_REG (SImode, CR0_REGNO + i));
28840 RTX_FRAME_RELATED_P (insn) = 1;
28843 cr_off += reg_size;
28847 /* Get the old lr if we saved it. If we are restoring registers
28848 out-of-line, then the out-of-line routines can do this for us. */
28849 if (restore_lr && restoring_GPRs_inline)
28850 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28852 /* Get the old cr if we saved it. */
28853 if (info->cr_save_p)
28855 unsigned cr_save_regno = 12;
28857 if (!restoring_GPRs_inline)
28859 /* Ensure we don't use the register used by the out-of-line
28860 gpr register restore below. */
28861 bool lr = info->gp_save_offset + info->gp_size == 0;
28862 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28863 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28865 if (gpr_ptr_regno == 12)
28866 cr_save_regno = 11;
28867 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28869 else if (REGNO (frame_reg_rtx) == 12)
28870 cr_save_regno = 11;
28872 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28873 info->cr_save_offset + frame_off,
28874 exit_func);
28877 /* Set LR here to try to overlap restores below. */
28878 if (restore_lr && restoring_GPRs_inline)
28879 restore_saved_lr (0, exit_func);
28881 /* Load exception handler data registers, if needed. */
28882 if (crtl->calls_eh_return)
28884 unsigned int i, regno;
28886 if (TARGET_AIX)
28888 rtx reg = gen_rtx_REG (reg_mode, 2);
28889 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28890 frame_off + RS6000_TOC_SAVE_SLOT));
28893 for (i = 0; ; ++i)
28895 rtx mem;
28897 regno = EH_RETURN_DATA_REGNO (i);
28898 if (regno == INVALID_REGNUM)
28899 break;
28901 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28902 info->ehrd_offset + frame_off
28903 + reg_size * (int) i);
28905 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28909 /* Restore GPRs. This is done as a PARALLEL if we are using
28910 the load-multiple instructions. */
28911 if (!restoring_GPRs_inline)
28913 /* We are jumping to an out-of-line function. */
28914 rtx ptr_reg;
28915 int end_save = info->gp_save_offset + info->gp_size;
28916 bool can_use_exit = end_save == 0;
28917 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28918 int ptr_off;
28920 /* Emit stack reset code if we need it. */
28921 ptr_regno = ptr_regno_for_savres (sel);
28922 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28923 if (can_use_exit)
28924 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28925 else if (end_save + frame_off != 0)
28926 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28927 GEN_INT (end_save + frame_off)));
28928 else if (REGNO (frame_reg_rtx) != ptr_regno)
28929 emit_move_insn (ptr_reg, frame_reg_rtx);
28930 if (REGNO (frame_reg_rtx) == ptr_regno)
28931 frame_off = -end_save;
28933 if (can_use_exit && info->cr_save_p)
28934 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28936 ptr_off = -end_save;
28937 rs6000_emit_savres_rtx (info, ptr_reg,
28938 info->gp_save_offset + ptr_off,
28939 info->lr_save_offset + ptr_off,
28940 reg_mode, sel);
28942 else if (using_load_multiple)
28944 rtvec p;
28945 p = rtvec_alloc (32 - info->first_gp_reg_save);
28946 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28947 RTVEC_ELT (p, i)
28948 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28949 frame_reg_rtx,
28950 info->gp_save_offset + frame_off + reg_size * i);
28951 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28953 else
28955 int offset = info->gp_save_offset + frame_off;
28956 for (i = info->first_gp_reg_save; i < 32; i++)
28958 if (save_reg_p (i)
28959 && !cfun->machine->gpr_is_wrapped_separately[i])
28961 rtx reg = gen_rtx_REG (reg_mode, i);
28962 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28965 offset += reg_size;
28969 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28971 /* If the frame pointer was used then we can't delay emitting
28972 a REG_CFA_DEF_CFA note. This must happen on the insn that
28973 restores the frame pointer, r31. We may have already emitted
28974 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28975 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28976 be harmless if emitted. */
28977 if (frame_pointer_needed)
28979 insn = get_last_insn ();
28980 add_reg_note (insn, REG_CFA_DEF_CFA,
28981 plus_constant (Pmode, frame_reg_rtx, frame_off));
28982 RTX_FRAME_RELATED_P (insn) = 1;
28985 /* Set up cfa_restores. We always need these when
28986 shrink-wrapping. If not shrink-wrapping then we only need
28987 the cfa_restore when the stack location is no longer valid.
28988 The cfa_restores must be emitted on or before the insn that
28989 invalidates the stack, and of course must not be emitted
28990 before the insn that actually does the restore. The latter
28991 is why it is a bad idea to emit the cfa_restores as a group
28992 on the last instruction here that actually does a restore:
28993 That insn may be reordered with respect to others doing
28994 restores. */
28995 if (flag_shrink_wrap
28996 && !restoring_GPRs_inline
28997 && info->first_fp_reg_save == 64)
28998 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29000 for (i = info->first_gp_reg_save; i < 32; i++)
29001 if (save_reg_p (i)
29002 && !cfun->machine->gpr_is_wrapped_separately[i])
29004 rtx reg = gen_rtx_REG (reg_mode, i);
29005 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29009 if (!restoring_GPRs_inline
29010 && info->first_fp_reg_save == 64)
29012 /* We are jumping to an out-of-line function. */
29013 if (cfa_restores)
29014 emit_cfa_restores (cfa_restores);
29015 return;
29018 if (restore_lr && !restoring_GPRs_inline)
29020 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
29021 restore_saved_lr (0, exit_func);
29024 /* Restore fpr's if we need to do it without calling a function. */
29025 if (restoring_FPRs_inline)
29027 int offset = info->fp_save_offset + frame_off;
29028 for (i = info->first_fp_reg_save; i < 64; i++)
29030 if (save_reg_p (i)
29031 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
29033 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29034 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
29035 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
29036 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
29037 cfa_restores);
29040 offset += fp_reg_size;
29044 /* If we saved cr, restore it here. Just those that were used. */
29045 if (info->cr_save_p)
29046 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
29048 /* If this is V.4, unwind the stack pointer after all of the loads
29049 have been done, or set up r11 if we are restoring fp out of line. */
29050 ptr_regno = 1;
29051 if (!restoring_FPRs_inline)
29053 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29054 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
29055 ptr_regno = ptr_regno_for_savres (sel);
29058 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
29059 if (REGNO (frame_reg_rtx) == ptr_regno)
29060 frame_off = 0;
29062 if (insn && restoring_FPRs_inline)
29064 if (cfa_restores)
29066 REG_NOTES (insn) = cfa_restores;
29067 cfa_restores = NULL_RTX;
29069 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
29070 RTX_FRAME_RELATED_P (insn) = 1;
29073 if (crtl->calls_eh_return)
29075 rtx sa = EH_RETURN_STACKADJ_RTX;
29076 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
29079 if (!sibcall && restoring_FPRs_inline)
29081 if (cfa_restores)
29083 /* We can't hang the cfa_restores off a simple return,
29084 since the shrink-wrap code sometimes uses an existing
29085 return. This means there might be a path from
29086 pre-prologue code to this return, and dwarf2cfi code
29087 wants the eh_frame unwinder state to be the same on
29088 all paths to any point. So we need to emit the
29089 cfa_restores before the return. For -m64 we really
29090 don't need epilogue cfa_restores at all, except for
29091 this irritating dwarf2cfi with shrink-wrap
29092 requirement; The stack red-zone means eh_frame info
29093 from the prologue telling the unwinder to restore
29094 from the stack is perfectly good right to the end of
29095 the function. */
29096 emit_insn (gen_blockage ());
29097 emit_cfa_restores (cfa_restores);
29098 cfa_restores = NULL_RTX;
29101 emit_jump_insn (targetm.gen_simple_return ());
29104 if (!sibcall && !restoring_FPRs_inline)
29106 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29107 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
29108 int elt = 0;
29109 RTVEC_ELT (p, elt++) = ret_rtx;
29110 if (lr)
29111 RTVEC_ELT (p, elt++)
29112 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29114 /* We have to restore more than two FP registers, so branch to the
29115 restore function. It will return to our caller. */
29116 int i;
29117 int reg;
29118 rtx sym;
29120 if (flag_shrink_wrap)
29121 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29123 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
29124 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
29125 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
29126 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
29128 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29130 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
29132 RTVEC_ELT (p, elt++)
29133 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
29134 if (flag_shrink_wrap
29135 && save_reg_p (info->first_fp_reg_save + i))
29136 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29139 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29142 if (cfa_restores)
29144 if (sibcall)
29145 /* Ensure the cfa_restores are hung off an insn that won't
29146 be reordered above other restores. */
29147 emit_insn (gen_blockage ());
29149 emit_cfa_restores (cfa_restores);
29153 /* Write function epilogue. */
29155 static void
29156 rs6000_output_function_epilogue (FILE *file)
29158 #if TARGET_MACHO
29159 macho_branch_islands ();
29162 rtx_insn *insn = get_last_insn ();
29163 rtx_insn *deleted_debug_label = NULL;
29165 /* Mach-O doesn't support labels at the end of objects, so if
29166 it looks like we might want one, take special action.
29168 First, collect any sequence of deleted debug labels. */
29169 while (insn
29170 && NOTE_P (insn)
29171 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29173 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29174 notes only, instead set their CODE_LABEL_NUMBER to -1,
29175 otherwise there would be code generation differences
29176 in between -g and -g0. */
29177 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29178 deleted_debug_label = insn;
29179 insn = PREV_INSN (insn);
29182 /* Second, if we have:
29183 label:
29184 barrier
29185 then this needs to be detected, so skip past the barrier. */
29187 if (insn && BARRIER_P (insn))
29188 insn = PREV_INSN (insn);
29190 /* Up to now we've only seen notes or barriers. */
29191 if (insn)
29193 if (LABEL_P (insn)
29194 || (NOTE_P (insn)
29195 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29196 /* Trailing label: <barrier>. */
29197 fputs ("\tnop\n", file);
29198 else
29200 /* Lastly, see if we have a completely empty function body. */
29201 while (insn && ! INSN_P (insn))
29202 insn = PREV_INSN (insn);
29203 /* If we don't find any insns, we've got an empty function body;
29204 I.e. completely empty - without a return or branch. This is
29205 taken as the case where a function body has been removed
29206 because it contains an inline __builtin_unreachable(). GCC
29207 states that reaching __builtin_unreachable() means UB so we're
29208 not obliged to do anything special; however, we want
29209 non-zero-sized function bodies. To meet this, and help the
29210 user out, let's trap the case. */
29211 if (insn == NULL)
29212 fputs ("\ttrap\n", file);
29215 else if (deleted_debug_label)
29216 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29217 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29218 CODE_LABEL_NUMBER (insn) = -1;
29220 #endif
29222 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29223 on its format.
29225 We don't output a traceback table if -finhibit-size-directive was
29226 used. The documentation for -finhibit-size-directive reads
29227 ``don't output a @code{.size} assembler directive, or anything
29228 else that would cause trouble if the function is split in the
29229 middle, and the two halves are placed at locations far apart in
29230 memory.'' The traceback table has this property, since it
29231 includes the offset from the start of the function to the
29232 traceback table itself.
29234 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29235 different traceback table. */
29236 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29237 && ! flag_inhibit_size_directive
29238 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29240 const char *fname = NULL;
29241 const char *language_string = lang_hooks.name;
29242 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29243 int i;
29244 int optional_tbtab;
29245 rs6000_stack_t *info = rs6000_stack_info ();
29247 if (rs6000_traceback == traceback_full)
29248 optional_tbtab = 1;
29249 else if (rs6000_traceback == traceback_part)
29250 optional_tbtab = 0;
29251 else
29252 optional_tbtab = !optimize_size && !TARGET_ELF;
29254 if (optional_tbtab)
29256 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29257 while (*fname == '.') /* V.4 encodes . in the name */
29258 fname++;
29260 /* Need label immediately before tbtab, so we can compute
29261 its offset from the function start. */
29262 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29263 ASM_OUTPUT_LABEL (file, fname);
29266 /* The .tbtab pseudo-op can only be used for the first eight
29267 expressions, since it can't handle the possibly variable
29268 length fields that follow. However, if you omit the optional
29269 fields, the assembler outputs zeros for all optional fields
29270 anyways, giving each variable length field is minimum length
29271 (as defined in sys/debug.h). Thus we can not use the .tbtab
29272 pseudo-op at all. */
29274 /* An all-zero word flags the start of the tbtab, for debuggers
29275 that have to find it by searching forward from the entry
29276 point or from the current pc. */
29277 fputs ("\t.long 0\n", file);
29279 /* Tbtab format type. Use format type 0. */
29280 fputs ("\t.byte 0,", file);
29282 /* Language type. Unfortunately, there does not seem to be any
29283 official way to discover the language being compiled, so we
29284 use language_string.
29285 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29286 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29287 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29288 either, so for now use 0. */
29289 if (lang_GNU_C ()
29290 || ! strcmp (language_string, "GNU GIMPLE")
29291 || ! strcmp (language_string, "GNU Go")
29292 || ! strcmp (language_string, "libgccjit"))
29293 i = 0;
29294 else if (! strcmp (language_string, "GNU F77")
29295 || lang_GNU_Fortran ())
29296 i = 1;
29297 else if (! strcmp (language_string, "GNU Pascal"))
29298 i = 2;
29299 else if (! strcmp (language_string, "GNU Ada"))
29300 i = 3;
29301 else if (lang_GNU_CXX ()
29302 || ! strcmp (language_string, "GNU Objective-C++"))
29303 i = 9;
29304 else if (! strcmp (language_string, "GNU Java"))
29305 i = 13;
29306 else if (! strcmp (language_string, "GNU Objective-C"))
29307 i = 14;
29308 else
29309 gcc_unreachable ();
29310 fprintf (file, "%d,", i);
29312 /* 8 single bit fields: global linkage (not set for C extern linkage,
29313 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29314 from start of procedure stored in tbtab, internal function, function
29315 has controlled storage, function has no toc, function uses fp,
29316 function logs/aborts fp operations. */
29317 /* Assume that fp operations are used if any fp reg must be saved. */
29318 fprintf (file, "%d,",
29319 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29321 /* 6 bitfields: function is interrupt handler, name present in
29322 proc table, function calls alloca, on condition directives
29323 (controls stack walks, 3 bits), saves condition reg, saves
29324 link reg. */
29325 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29326 set up as a frame pointer, even when there is no alloca call. */
29327 fprintf (file, "%d,",
29328 ((optional_tbtab << 6)
29329 | ((optional_tbtab & frame_pointer_needed) << 5)
29330 | (info->cr_save_p << 1)
29331 | (info->lr_save_p)));
29333 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29334 (6 bits). */
29335 fprintf (file, "%d,",
29336 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29338 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29339 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29341 if (optional_tbtab)
29343 /* Compute the parameter info from the function decl argument
29344 list. */
29345 tree decl;
29346 int next_parm_info_bit = 31;
29348 for (decl = DECL_ARGUMENTS (current_function_decl);
29349 decl; decl = DECL_CHAIN (decl))
29351 rtx parameter = DECL_INCOMING_RTL (decl);
29352 machine_mode mode = GET_MODE (parameter);
29354 if (GET_CODE (parameter) == REG)
29356 if (SCALAR_FLOAT_MODE_P (mode))
29358 int bits;
29360 float_parms++;
29362 switch (mode)
29364 case E_SFmode:
29365 case E_SDmode:
29366 bits = 0x2;
29367 break;
29369 case E_DFmode:
29370 case E_DDmode:
29371 case E_TFmode:
29372 case E_TDmode:
29373 case E_IFmode:
29374 case E_KFmode:
29375 bits = 0x3;
29376 break;
29378 default:
29379 gcc_unreachable ();
29382 /* If only one bit will fit, don't or in this entry. */
29383 if (next_parm_info_bit > 0)
29384 parm_info |= (bits << (next_parm_info_bit - 1));
29385 next_parm_info_bit -= 2;
29387 else
29389 fixed_parms += ((GET_MODE_SIZE (mode)
29390 + (UNITS_PER_WORD - 1))
29391 / UNITS_PER_WORD);
29392 next_parm_info_bit -= 1;
29398 /* Number of fixed point parameters. */
29399 /* This is actually the number of words of fixed point parameters; thus
29400 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29401 fprintf (file, "%d,", fixed_parms);
29403 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29404 all on stack. */
29405 /* This is actually the number of fp registers that hold parameters;
29406 and thus the maximum value is 13. */
29407 /* Set parameters on stack bit if parameters are not in their original
29408 registers, regardless of whether they are on the stack? Xlc
29409 seems to set the bit when not optimizing. */
29410 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29412 if (optional_tbtab)
29414 /* Optional fields follow. Some are variable length. */
29416 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29417 float, 11 double float. */
29418 /* There is an entry for each parameter in a register, in the order
29419 that they occur in the parameter list. Any intervening arguments
29420 on the stack are ignored. If the list overflows a long (max
29421 possible length 34 bits) then completely leave off all elements
29422 that don't fit. */
29423 /* Only emit this long if there was at least one parameter. */
29424 if (fixed_parms || float_parms)
29425 fprintf (file, "\t.long %d\n", parm_info);
29427 /* Offset from start of code to tb table. */
29428 fputs ("\t.long ", file);
29429 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29430 RS6000_OUTPUT_BASENAME (file, fname);
29431 putc ('-', file);
29432 rs6000_output_function_entry (file, fname);
29433 putc ('\n', file);
29435 /* Interrupt handler mask. */
29436 /* Omit this long, since we never set the interrupt handler bit
29437 above. */
29439 /* Number of CTL (controlled storage) anchors. */
29440 /* Omit this long, since the has_ctl bit is never set above. */
29442 /* Displacement into stack of each CTL anchor. */
29443 /* Omit this list of longs, because there are no CTL anchors. */
29445 /* Length of function name. */
29446 if (*fname == '*')
29447 ++fname;
29448 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29450 /* Function name. */
29451 assemble_string (fname, strlen (fname));
29453 /* Register for alloca automatic storage; this is always reg 31.
29454 Only emit this if the alloca bit was set above. */
29455 if (frame_pointer_needed)
29456 fputs ("\t.byte 31\n", file);
29458 fputs ("\t.align 2\n", file);
29462 /* Arrange to define .LCTOC1 label, if not already done. */
29463 if (need_toc_init)
29465 need_toc_init = 0;
29466 if (!toc_initialized)
29468 switch_to_section (toc_section);
29469 switch_to_section (current_function_section ());
29474 /* -fsplit-stack support. */
29476 /* A SYMBOL_REF for __morestack. */
29477 static GTY(()) rtx morestack_ref;
29479 static rtx
29480 gen_add3_const (rtx rt, rtx ra, long c)
29482 if (TARGET_64BIT)
29483 return gen_adddi3 (rt, ra, GEN_INT (c));
29484 else
29485 return gen_addsi3 (rt, ra, GEN_INT (c));
29488 /* Emit -fsplit-stack prologue, which goes before the regular function
29489 prologue (at local entry point in the case of ELFv2). */
29491 void
29492 rs6000_expand_split_stack_prologue (void)
29494 rs6000_stack_t *info = rs6000_stack_info ();
29495 unsigned HOST_WIDE_INT allocate;
29496 long alloc_hi, alloc_lo;
29497 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29498 rtx_insn *insn;
29500 gcc_assert (flag_split_stack && reload_completed);
29502 if (!info->push_p)
29503 return;
29505 if (global_regs[29])
29507 error ("%qs uses register r29", "-fsplit-stack");
29508 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29509 "conflicts with %qD", global_regs_decl[29]);
29512 allocate = info->total_size;
29513 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29515 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29516 return;
29518 if (morestack_ref == NULL_RTX)
29520 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29521 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29522 | SYMBOL_FLAG_FUNCTION);
29525 r0 = gen_rtx_REG (Pmode, 0);
29526 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29527 r12 = gen_rtx_REG (Pmode, 12);
29528 emit_insn (gen_load_split_stack_limit (r0));
29529 /* Always emit two insns here to calculate the requested stack,
29530 so that the linker can edit them when adjusting size for calling
29531 non-split-stack code. */
29532 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29533 alloc_lo = -allocate - alloc_hi;
29534 if (alloc_hi != 0)
29536 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29537 if (alloc_lo != 0)
29538 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29539 else
29540 emit_insn (gen_nop ());
29542 else
29544 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29545 emit_insn (gen_nop ());
29548 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29549 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29550 ok_label = gen_label_rtx ();
29551 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29552 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29553 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29554 pc_rtx);
29555 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29556 JUMP_LABEL (insn) = ok_label;
29557 /* Mark the jump as very likely to be taken. */
29558 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29560 lr = gen_rtx_REG (Pmode, LR_REGNO);
29561 insn = emit_move_insn (r0, lr);
29562 RTX_FRAME_RELATED_P (insn) = 1;
29563 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29564 RTX_FRAME_RELATED_P (insn) = 1;
29566 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29567 const0_rtx, const0_rtx));
29568 call_fusage = NULL_RTX;
29569 use_reg (&call_fusage, r12);
29570 /* Say the call uses r0, even though it doesn't, to stop regrename
29571 from twiddling with the insns saving lr, trashing args for cfun.
29572 The insns restoring lr are similarly protected by making
29573 split_stack_return use r0. */
29574 use_reg (&call_fusage, r0);
29575 add_function_usage_to (insn, call_fusage);
29576 /* Indicate that this function can't jump to non-local gotos. */
29577 make_reg_eh_region_note_nothrow_nononlocal (insn);
29578 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29579 insn = emit_move_insn (lr, r0);
29580 add_reg_note (insn, REG_CFA_RESTORE, lr);
29581 RTX_FRAME_RELATED_P (insn) = 1;
29582 emit_insn (gen_split_stack_return ());
29584 emit_label (ok_label);
29585 LABEL_NUSES (ok_label) = 1;
29588 /* Return the internal arg pointer used for function incoming
29589 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29590 to copy it to a pseudo in order for it to be preserved over calls
29591 and suchlike. We'd really like to use a pseudo here for the
29592 internal arg pointer but data-flow analysis is not prepared to
29593 accept pseudos as live at the beginning of a function. */
29595 static rtx
29596 rs6000_internal_arg_pointer (void)
29598 if (flag_split_stack
29599 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29600 == NULL))
29603 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29605 rtx pat;
29607 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29608 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29610 /* Put the pseudo initialization right after the note at the
29611 beginning of the function. */
29612 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29613 gen_rtx_REG (Pmode, 12));
29614 push_topmost_sequence ();
29615 emit_insn_after (pat, get_insns ());
29616 pop_topmost_sequence ();
29618 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29619 FIRST_PARM_OFFSET (current_function_decl));
29621 return virtual_incoming_args_rtx;
29624 /* We may have to tell the dataflow pass that the split stack prologue
29625 is initializing a register. */
29627 static void
29628 rs6000_live_on_entry (bitmap regs)
29630 if (flag_split_stack)
29631 bitmap_set_bit (regs, 12);
29634 /* Emit -fsplit-stack dynamic stack allocation space check. */
29636 void
29637 rs6000_split_stack_space_check (rtx size, rtx label)
29639 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29640 rtx limit = gen_reg_rtx (Pmode);
29641 rtx requested = gen_reg_rtx (Pmode);
29642 rtx cmp = gen_reg_rtx (CCUNSmode);
29643 rtx jump;
29645 emit_insn (gen_load_split_stack_limit (limit));
29646 if (CONST_INT_P (size))
29647 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29648 else
29650 size = force_reg (Pmode, size);
29651 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29653 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29654 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29655 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29656 gen_rtx_LABEL_REF (VOIDmode, label),
29657 pc_rtx);
29658 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29659 JUMP_LABEL (jump) = label;
29662 /* A C compound statement that outputs the assembler code for a thunk
29663 function, used to implement C++ virtual function calls with
29664 multiple inheritance. The thunk acts as a wrapper around a virtual
29665 function, adjusting the implicit object parameter before handing
29666 control off to the real function.
29668 First, emit code to add the integer DELTA to the location that
29669 contains the incoming first argument. Assume that this argument
29670 contains a pointer, and is the one used to pass the `this' pointer
29671 in C++. This is the incoming argument *before* the function
29672 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29673 values of all other incoming arguments.
29675 After the addition, emit code to jump to FUNCTION, which is a
29676 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29677 not touch the return address. Hence returning from FUNCTION will
29678 return to whoever called the current `thunk'.
29680 The effect must be as if FUNCTION had been called directly with the
29681 adjusted first argument. This macro is responsible for emitting
29682 all of the code for a thunk function; output_function_prologue()
29683 and output_function_epilogue() are not invoked.
29685 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29686 been extracted from it.) It might possibly be useful on some
29687 targets, but probably not.
29689 If you do not define this macro, the target-independent code in the
29690 C++ frontend will generate a less efficient heavyweight thunk that
29691 calls FUNCTION instead of jumping to it. The generic approach does
29692 not support varargs. */
29694 static void
29695 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29696 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29697 tree function)
29699 rtx this_rtx, funexp;
29700 rtx_insn *insn;
29702 reload_completed = 1;
29703 epilogue_completed = 1;
29705 /* Mark the end of the (empty) prologue. */
29706 emit_note (NOTE_INSN_PROLOGUE_END);
29708 /* Find the "this" pointer. If the function returns a structure,
29709 the structure return pointer is in r3. */
29710 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29711 this_rtx = gen_rtx_REG (Pmode, 4);
29712 else
29713 this_rtx = gen_rtx_REG (Pmode, 3);
29715 /* Apply the constant offset, if required. */
29716 if (delta)
29717 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29719 /* Apply the offset from the vtable, if required. */
29720 if (vcall_offset)
29722 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29723 rtx tmp = gen_rtx_REG (Pmode, 12);
29725 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29726 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29728 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29729 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29731 else
29733 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29735 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29737 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29740 /* Generate a tail call to the target function. */
29741 if (!TREE_USED (function))
29743 assemble_external (function);
29744 TREE_USED (function) = 1;
29746 funexp = XEXP (DECL_RTL (function), 0);
29747 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29749 #if TARGET_MACHO
29750 if (MACHOPIC_INDIRECT)
29751 funexp = machopic_indirect_call_target (funexp);
29752 #endif
29754 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29755 generate sibcall RTL explicitly. */
29756 insn = emit_call_insn (
29757 gen_rtx_PARALLEL (VOIDmode,
29758 gen_rtvec (3,
29759 gen_rtx_CALL (VOIDmode,
29760 funexp, const0_rtx),
29761 gen_rtx_USE (VOIDmode, const0_rtx),
29762 simple_return_rtx)));
29763 SIBLING_CALL_P (insn) = 1;
29764 emit_barrier ();
29766 /* Run just enough of rest_of_compilation to get the insns emitted.
29767 There's not really enough bulk here to make other passes such as
29768 instruction scheduling worth while. Note that use_thunk calls
29769 assemble_start_function and assemble_end_function. */
29770 insn = get_insns ();
29771 shorten_branches (insn);
29772 final_start_function (insn, file, 1);
29773 final (insn, file, 1);
29774 final_end_function ();
29776 reload_completed = 0;
29777 epilogue_completed = 0;
29780 /* A quick summary of the various types of 'constant-pool tables'
29781 under PowerPC:
29783 Target Flags Name One table per
29784 AIX (none) AIX TOC object file
29785 AIX -mfull-toc AIX TOC object file
29786 AIX -mminimal-toc AIX minimal TOC translation unit
29787 SVR4/EABI (none) SVR4 SDATA object file
29788 SVR4/EABI -fpic SVR4 pic object file
29789 SVR4/EABI -fPIC SVR4 PIC translation unit
29790 SVR4/EABI -mrelocatable EABI TOC function
29791 SVR4/EABI -maix AIX TOC object file
29792 SVR4/EABI -maix -mminimal-toc
29793 AIX minimal TOC translation unit
29795 Name Reg. Set by entries contains:
29796 made by addrs? fp? sum?
29798 AIX TOC 2 crt0 as Y option option
29799 AIX minimal TOC 30 prolog gcc Y Y option
29800 SVR4 SDATA 13 crt0 gcc N Y N
29801 SVR4 pic 30 prolog ld Y not yet N
29802 SVR4 PIC 30 prolog gcc Y option option
29803 EABI TOC 30 prolog gcc Y option option
29807 /* Hash functions for the hash table. */
29809 static unsigned
29810 rs6000_hash_constant (rtx k)
29812 enum rtx_code code = GET_CODE (k);
29813 machine_mode mode = GET_MODE (k);
29814 unsigned result = (code << 3) ^ mode;
29815 const char *format;
29816 int flen, fidx;
29818 format = GET_RTX_FORMAT (code);
29819 flen = strlen (format);
29820 fidx = 0;
29822 switch (code)
29824 case LABEL_REF:
29825 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29827 case CONST_WIDE_INT:
29829 int i;
29830 flen = CONST_WIDE_INT_NUNITS (k);
29831 for (i = 0; i < flen; i++)
29832 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29833 return result;
29836 case CONST_DOUBLE:
29837 if (mode != VOIDmode)
29838 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29839 flen = 2;
29840 break;
29842 case CODE_LABEL:
29843 fidx = 3;
29844 break;
29846 default:
29847 break;
29850 for (; fidx < flen; fidx++)
29851 switch (format[fidx])
29853 case 's':
29855 unsigned i, len;
29856 const char *str = XSTR (k, fidx);
29857 len = strlen (str);
29858 result = result * 613 + len;
29859 for (i = 0; i < len; i++)
29860 result = result * 613 + (unsigned) str[i];
29861 break;
29863 case 'u':
29864 case 'e':
29865 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29866 break;
29867 case 'i':
29868 case 'n':
29869 result = result * 613 + (unsigned) XINT (k, fidx);
29870 break;
29871 case 'w':
29872 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29873 result = result * 613 + (unsigned) XWINT (k, fidx);
29874 else
29876 size_t i;
29877 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29878 result = result * 613 + (unsigned) (XWINT (k, fidx)
29879 >> CHAR_BIT * i);
29881 break;
29882 case '0':
29883 break;
29884 default:
29885 gcc_unreachable ();
29888 return result;
29891 hashval_t
29892 toc_hasher::hash (toc_hash_struct *thc)
29894 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29897 /* Compare H1 and H2 for equivalence. */
29899 bool
29900 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29902 rtx r1 = h1->key;
29903 rtx r2 = h2->key;
29905 if (h1->key_mode != h2->key_mode)
29906 return 0;
29908 return rtx_equal_p (r1, r2);
29911 /* These are the names given by the C++ front-end to vtables, and
29912 vtable-like objects. Ideally, this logic should not be here;
29913 instead, there should be some programmatic way of inquiring as
29914 to whether or not an object is a vtable. */
29916 #define VTABLE_NAME_P(NAME) \
29917 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29918 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29919 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29920 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29921 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29923 #ifdef NO_DOLLAR_IN_LABEL
29924 /* Return a GGC-allocated character string translating dollar signs in
29925 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29927 const char *
29928 rs6000_xcoff_strip_dollar (const char *name)
29930 char *strip, *p;
29931 const char *q;
29932 size_t len;
29934 q = (const char *) strchr (name, '$');
29936 if (q == 0 || q == name)
29937 return name;
29939 len = strlen (name);
29940 strip = XALLOCAVEC (char, len + 1);
29941 strcpy (strip, name);
29942 p = strip + (q - name);
29943 while (p)
29945 *p = '_';
29946 p = strchr (p + 1, '$');
29949 return ggc_alloc_string (strip, len);
29951 #endif
29953 void
29954 rs6000_output_symbol_ref (FILE *file, rtx x)
29956 const char *name = XSTR (x, 0);
29958 /* Currently C++ toc references to vtables can be emitted before it
29959 is decided whether the vtable is public or private. If this is
29960 the case, then the linker will eventually complain that there is
29961 a reference to an unknown section. Thus, for vtables only,
29962 we emit the TOC reference to reference the identifier and not the
29963 symbol. */
29964 if (VTABLE_NAME_P (name))
29966 RS6000_OUTPUT_BASENAME (file, name);
29968 else
29969 assemble_name (file, name);
29972 /* Output a TOC entry. We derive the entry name from what is being
29973 written. */
29975 void
29976 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29978 char buf[256];
29979 const char *name = buf;
29980 rtx base = x;
29981 HOST_WIDE_INT offset = 0;
29983 gcc_assert (!TARGET_NO_TOC);
29985 /* When the linker won't eliminate them, don't output duplicate
29986 TOC entries (this happens on AIX if there is any kind of TOC,
29987 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29988 CODE_LABELs. */
29989 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29991 struct toc_hash_struct *h;
29993 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29994 time because GGC is not initialized at that point. */
29995 if (toc_hash_table == NULL)
29996 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29998 h = ggc_alloc<toc_hash_struct> ();
29999 h->key = x;
30000 h->key_mode = mode;
30001 h->labelno = labelno;
30003 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
30004 if (*found == NULL)
30005 *found = h;
30006 else /* This is indeed a duplicate.
30007 Set this label equal to that label. */
30009 fputs ("\t.set ", file);
30010 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
30011 fprintf (file, "%d,", labelno);
30012 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
30013 fprintf (file, "%d\n", ((*found)->labelno));
30015 #ifdef HAVE_AS_TLS
30016 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
30017 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
30018 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
30020 fputs ("\t.set ", file);
30021 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
30022 fprintf (file, "%d,", labelno);
30023 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
30024 fprintf (file, "%d\n", ((*found)->labelno));
30026 #endif
30027 return;
30031 /* If we're going to put a double constant in the TOC, make sure it's
30032 aligned properly when strict alignment is on. */
30033 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
30034 && STRICT_ALIGNMENT
30035 && GET_MODE_BITSIZE (mode) >= 64
30036 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
30037 ASM_OUTPUT_ALIGN (file, 3);
30040 (*targetm.asm_out.internal_label) (file, "LC", labelno);
30042 /* Handle FP constants specially. Note that if we have a minimal
30043 TOC, things we put here aren't actually in the TOC, so we can allow
30044 FP constants. */
30045 if (GET_CODE (x) == CONST_DOUBLE &&
30046 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
30047 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
30049 long k[4];
30051 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30052 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
30053 else
30054 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30056 if (TARGET_64BIT)
30058 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30059 fputs (DOUBLE_INT_ASM_OP, file);
30060 else
30061 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30062 k[0] & 0xffffffff, k[1] & 0xffffffff,
30063 k[2] & 0xffffffff, k[3] & 0xffffffff);
30064 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
30065 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30066 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
30067 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
30068 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
30069 return;
30071 else
30073 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30074 fputs ("\t.long ", file);
30075 else
30076 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30077 k[0] & 0xffffffff, k[1] & 0xffffffff,
30078 k[2] & 0xffffffff, k[3] & 0xffffffff);
30079 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
30080 k[0] & 0xffffffff, k[1] & 0xffffffff,
30081 k[2] & 0xffffffff, k[3] & 0xffffffff);
30082 return;
30085 else if (GET_CODE (x) == CONST_DOUBLE &&
30086 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
30088 long k[2];
30090 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30091 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
30092 else
30093 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30095 if (TARGET_64BIT)
30097 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30098 fputs (DOUBLE_INT_ASM_OP, file);
30099 else
30100 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30101 k[0] & 0xffffffff, k[1] & 0xffffffff);
30102 fprintf (file, "0x%lx%08lx\n",
30103 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30104 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
30105 return;
30107 else
30109 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30110 fputs ("\t.long ", file);
30111 else
30112 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30113 k[0] & 0xffffffff, k[1] & 0xffffffff);
30114 fprintf (file, "0x%lx,0x%lx\n",
30115 k[0] & 0xffffffff, k[1] & 0xffffffff);
30116 return;
30119 else if (GET_CODE (x) == CONST_DOUBLE &&
30120 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
30122 long l;
30124 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30125 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
30126 else
30127 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
30129 if (TARGET_64BIT)
30131 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30132 fputs (DOUBLE_INT_ASM_OP, file);
30133 else
30134 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30135 if (WORDS_BIG_ENDIAN)
30136 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
30137 else
30138 fprintf (file, "0x%lx\n", l & 0xffffffff);
30139 return;
30141 else
30143 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30144 fputs ("\t.long ", file);
30145 else
30146 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30147 fprintf (file, "0x%lx\n", l & 0xffffffff);
30148 return;
30151 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
30153 unsigned HOST_WIDE_INT low;
30154 HOST_WIDE_INT high;
30156 low = INTVAL (x) & 0xffffffff;
30157 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30159 /* TOC entries are always Pmode-sized, so when big-endian
30160 smaller integer constants in the TOC need to be padded.
30161 (This is still a win over putting the constants in
30162 a separate constant pool, because then we'd have
30163 to have both a TOC entry _and_ the actual constant.)
30165 For a 32-bit target, CONST_INT values are loaded and shifted
30166 entirely within `low' and can be stored in one TOC entry. */
30168 /* It would be easy to make this work, but it doesn't now. */
30169 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30171 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30173 low |= high << 32;
30174 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30175 high = (HOST_WIDE_INT) low >> 32;
30176 low &= 0xffffffff;
30179 if (TARGET_64BIT)
30181 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30182 fputs (DOUBLE_INT_ASM_OP, file);
30183 else
30184 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30185 (long) high & 0xffffffff, (long) low & 0xffffffff);
30186 fprintf (file, "0x%lx%08lx\n",
30187 (long) high & 0xffffffff, (long) low & 0xffffffff);
30188 return;
30190 else
30192 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30194 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30195 fputs ("\t.long ", file);
30196 else
30197 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30198 (long) high & 0xffffffff, (long) low & 0xffffffff);
30199 fprintf (file, "0x%lx,0x%lx\n",
30200 (long) high & 0xffffffff, (long) low & 0xffffffff);
30202 else
30204 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30205 fputs ("\t.long ", file);
30206 else
30207 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30208 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30210 return;
30214 if (GET_CODE (x) == CONST)
30216 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30217 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
30219 base = XEXP (XEXP (x, 0), 0);
30220 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30223 switch (GET_CODE (base))
30225 case SYMBOL_REF:
30226 name = XSTR (base, 0);
30227 break;
30229 case LABEL_REF:
30230 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30231 CODE_LABEL_NUMBER (XEXP (base, 0)));
30232 break;
30234 case CODE_LABEL:
30235 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30236 break;
30238 default:
30239 gcc_unreachable ();
30242 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30243 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30244 else
30246 fputs ("\t.tc ", file);
30247 RS6000_OUTPUT_BASENAME (file, name);
30249 if (offset < 0)
30250 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30251 else if (offset)
30252 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30254 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30255 after other TOC symbols, reducing overflow of small TOC access
30256 to [TC] symbols. */
30257 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30258 ? "[TE]," : "[TC],", file);
30261 /* Currently C++ toc references to vtables can be emitted before it
30262 is decided whether the vtable is public or private. If this is
30263 the case, then the linker will eventually complain that there is
30264 a TOC reference to an unknown section. Thus, for vtables only,
30265 we emit the TOC reference to reference the symbol and not the
30266 section. */
30267 if (VTABLE_NAME_P (name))
30269 RS6000_OUTPUT_BASENAME (file, name);
30270 if (offset < 0)
30271 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30272 else if (offset > 0)
30273 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30275 else
30276 output_addr_const (file, x);
30278 #if HAVE_AS_TLS
30279 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30281 switch (SYMBOL_REF_TLS_MODEL (base))
30283 case 0:
30284 break;
30285 case TLS_MODEL_LOCAL_EXEC:
30286 fputs ("@le", file);
30287 break;
30288 case TLS_MODEL_INITIAL_EXEC:
30289 fputs ("@ie", file);
30290 break;
30291 /* Use global-dynamic for local-dynamic. */
30292 case TLS_MODEL_GLOBAL_DYNAMIC:
30293 case TLS_MODEL_LOCAL_DYNAMIC:
30294 putc ('\n', file);
30295 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30296 fputs ("\t.tc .", file);
30297 RS6000_OUTPUT_BASENAME (file, name);
30298 fputs ("[TC],", file);
30299 output_addr_const (file, x);
30300 fputs ("@m", file);
30301 break;
30302 default:
30303 gcc_unreachable ();
30306 #endif
30308 putc ('\n', file);
30311 /* Output an assembler pseudo-op to write an ASCII string of N characters
30312 starting at P to FILE.
30314 On the RS/6000, we have to do this using the .byte operation and
30315 write out special characters outside the quoted string.
30316 Also, the assembler is broken; very long strings are truncated,
30317 so we must artificially break them up early. */
30319 void
30320 output_ascii (FILE *file, const char *p, int n)
30322 char c;
30323 int i, count_string;
30324 const char *for_string = "\t.byte \"";
30325 const char *for_decimal = "\t.byte ";
30326 const char *to_close = NULL;
30328 count_string = 0;
30329 for (i = 0; i < n; i++)
30331 c = *p++;
30332 if (c >= ' ' && c < 0177)
30334 if (for_string)
30335 fputs (for_string, file);
30336 putc (c, file);
30338 /* Write two quotes to get one. */
30339 if (c == '"')
30341 putc (c, file);
30342 ++count_string;
30345 for_string = NULL;
30346 for_decimal = "\"\n\t.byte ";
30347 to_close = "\"\n";
30348 ++count_string;
30350 if (count_string >= 512)
30352 fputs (to_close, file);
30354 for_string = "\t.byte \"";
30355 for_decimal = "\t.byte ";
30356 to_close = NULL;
30357 count_string = 0;
30360 else
30362 if (for_decimal)
30363 fputs (for_decimal, file);
30364 fprintf (file, "%d", c);
30366 for_string = "\n\t.byte \"";
30367 for_decimal = ", ";
30368 to_close = "\n";
30369 count_string = 0;
30373 /* Now close the string if we have written one. Then end the line. */
30374 if (to_close)
30375 fputs (to_close, file);
30378 /* Generate a unique section name for FILENAME for a section type
30379 represented by SECTION_DESC. Output goes into BUF.
30381 SECTION_DESC can be any string, as long as it is different for each
30382 possible section type.
30384 We name the section in the same manner as xlc. The name begins with an
30385 underscore followed by the filename (after stripping any leading directory
30386 names) with the last period replaced by the string SECTION_DESC. If
30387 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30388 the name. */
30390 void
30391 rs6000_gen_section_name (char **buf, const char *filename,
30392 const char *section_desc)
30394 const char *q, *after_last_slash, *last_period = 0;
30395 char *p;
30396 int len;
30398 after_last_slash = filename;
30399 for (q = filename; *q; q++)
30401 if (*q == '/')
30402 after_last_slash = q + 1;
30403 else if (*q == '.')
30404 last_period = q;
30407 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30408 *buf = (char *) xmalloc (len);
30410 p = *buf;
30411 *p++ = '_';
30413 for (q = after_last_slash; *q; q++)
30415 if (q == last_period)
30417 strcpy (p, section_desc);
30418 p += strlen (section_desc);
30419 break;
30422 else if (ISALNUM (*q))
30423 *p++ = *q;
30426 if (last_period == 0)
30427 strcpy (p, section_desc);
30428 else
30429 *p = '\0';
30432 /* Emit profile function. */
30434 void
30435 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30437 /* Non-standard profiling for kernels, which just saves LR then calls
30438 _mcount without worrying about arg saves. The idea is to change
30439 the function prologue as little as possible as it isn't easy to
30440 account for arg save/restore code added just for _mcount. */
30441 if (TARGET_PROFILE_KERNEL)
30442 return;
30444 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30446 #ifndef NO_PROFILE_COUNTERS
30447 # define NO_PROFILE_COUNTERS 0
30448 #endif
30449 if (NO_PROFILE_COUNTERS)
30450 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30451 LCT_NORMAL, VOIDmode);
30452 else
30454 char buf[30];
30455 const char *label_name;
30456 rtx fun;
30458 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30459 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30460 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30462 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30463 LCT_NORMAL, VOIDmode, fun, Pmode);
30466 else if (DEFAULT_ABI == ABI_DARWIN)
30468 const char *mcount_name = RS6000_MCOUNT;
30469 int caller_addr_regno = LR_REGNO;
30471 /* Be conservative and always set this, at least for now. */
30472 crtl->uses_pic_offset_table = 1;
30474 #if TARGET_MACHO
30475 /* For PIC code, set up a stub and collect the caller's address
30476 from r0, which is where the prologue puts it. */
30477 if (MACHOPIC_INDIRECT
30478 && crtl->uses_pic_offset_table)
30479 caller_addr_regno = 0;
30480 #endif
30481 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30482 LCT_NORMAL, VOIDmode,
30483 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30487 /* Write function profiler code. */
30489 void
30490 output_function_profiler (FILE *file, int labelno)
30492 char buf[100];
30494 switch (DEFAULT_ABI)
30496 default:
30497 gcc_unreachable ();
30499 case ABI_V4:
30500 if (!TARGET_32BIT)
30502 warning (0, "no profiling of 64-bit code for this ABI");
30503 return;
30505 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30506 fprintf (file, "\tmflr %s\n", reg_names[0]);
30507 if (NO_PROFILE_COUNTERS)
30509 asm_fprintf (file, "\tstw %s,4(%s)\n",
30510 reg_names[0], reg_names[1]);
30512 else if (TARGET_SECURE_PLT && flag_pic)
30514 if (TARGET_LINK_STACK)
30516 char name[32];
30517 get_ppc476_thunk_name (name);
30518 asm_fprintf (file, "\tbl %s\n", name);
30520 else
30521 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30522 asm_fprintf (file, "\tstw %s,4(%s)\n",
30523 reg_names[0], reg_names[1]);
30524 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30525 asm_fprintf (file, "\taddis %s,%s,",
30526 reg_names[12], reg_names[12]);
30527 assemble_name (file, buf);
30528 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30529 assemble_name (file, buf);
30530 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30532 else if (flag_pic == 1)
30534 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30535 asm_fprintf (file, "\tstw %s,4(%s)\n",
30536 reg_names[0], reg_names[1]);
30537 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30538 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30539 assemble_name (file, buf);
30540 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30542 else if (flag_pic > 1)
30544 asm_fprintf (file, "\tstw %s,4(%s)\n",
30545 reg_names[0], reg_names[1]);
30546 /* Now, we need to get the address of the label. */
30547 if (TARGET_LINK_STACK)
30549 char name[32];
30550 get_ppc476_thunk_name (name);
30551 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30552 assemble_name (file, buf);
30553 fputs ("-.\n1:", file);
30554 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30555 asm_fprintf (file, "\taddi %s,%s,4\n",
30556 reg_names[11], reg_names[11]);
30558 else
30560 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30561 assemble_name (file, buf);
30562 fputs ("-.\n1:", file);
30563 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30565 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30566 reg_names[0], reg_names[11]);
30567 asm_fprintf (file, "\tadd %s,%s,%s\n",
30568 reg_names[0], reg_names[0], reg_names[11]);
30570 else
30572 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30573 assemble_name (file, buf);
30574 fputs ("@ha\n", file);
30575 asm_fprintf (file, "\tstw %s,4(%s)\n",
30576 reg_names[0], reg_names[1]);
30577 asm_fprintf (file, "\tla %s,", reg_names[0]);
30578 assemble_name (file, buf);
30579 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30582 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30583 fprintf (file, "\tbl %s%s\n",
30584 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30585 break;
30587 case ABI_AIX:
30588 case ABI_ELFv2:
30589 case ABI_DARWIN:
30590 /* Don't do anything, done in output_profile_hook (). */
30591 break;
30597 /* The following variable value is the last issued insn. */
30599 static rtx_insn *last_scheduled_insn;
30601 /* The following variable helps to balance issuing of load and
30602 store instructions */
30604 static int load_store_pendulum;
30606 /* The following variable helps pair divide insns during scheduling. */
30607 static int divide_cnt;
30608 /* The following variable helps pair and alternate vector and vector load
30609 insns during scheduling. */
30610 static int vec_pairing;
30613 /* Power4 load update and store update instructions are cracked into a
30614 load or store and an integer insn which are executed in the same cycle.
30615 Branches have their own dispatch slot which does not count against the
30616 GCC issue rate, but it changes the program flow so there are no other
30617 instructions to issue in this cycle. */
30619 static int
30620 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30622 last_scheduled_insn = insn;
30623 if (GET_CODE (PATTERN (insn)) == USE
30624 || GET_CODE (PATTERN (insn)) == CLOBBER)
30626 cached_can_issue_more = more;
30627 return cached_can_issue_more;
30630 if (insn_terminates_group_p (insn, current_group))
30632 cached_can_issue_more = 0;
30633 return cached_can_issue_more;
30636 /* If no reservation, but reach here */
30637 if (recog_memoized (insn) < 0)
30638 return more;
30640 if (rs6000_sched_groups)
30642 if (is_microcoded_insn (insn))
30643 cached_can_issue_more = 0;
30644 else if (is_cracked_insn (insn))
30645 cached_can_issue_more = more > 2 ? more - 2 : 0;
30646 else
30647 cached_can_issue_more = more - 1;
30649 return cached_can_issue_more;
30652 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30653 return 0;
30655 cached_can_issue_more = more - 1;
30656 return cached_can_issue_more;
30659 static int
30660 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30662 int r = rs6000_variable_issue_1 (insn, more);
30663 if (verbose)
30664 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30665 return r;
30668 /* Adjust the cost of a scheduling dependency. Return the new cost of
30669 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30671 static int
30672 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30673 unsigned int)
30675 enum attr_type attr_type;
30677 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30678 return cost;
30680 switch (dep_type)
30682 case REG_DEP_TRUE:
30684 /* Data dependency; DEP_INSN writes a register that INSN reads
30685 some cycles later. */
30687 /* Separate a load from a narrower, dependent store. */
30688 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30689 && GET_CODE (PATTERN (insn)) == SET
30690 && GET_CODE (PATTERN (dep_insn)) == SET
30691 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30692 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30693 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30694 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30695 return cost + 14;
30697 attr_type = get_attr_type (insn);
30699 switch (attr_type)
30701 case TYPE_JMPREG:
30702 /* Tell the first scheduling pass about the latency between
30703 a mtctr and bctr (and mtlr and br/blr). The first
30704 scheduling pass will not know about this latency since
30705 the mtctr instruction, which has the latency associated
30706 to it, will be generated by reload. */
30707 return 4;
30708 case TYPE_BRANCH:
30709 /* Leave some extra cycles between a compare and its
30710 dependent branch, to inhibit expensive mispredicts. */
30711 if ((rs6000_tune == PROCESSOR_PPC603
30712 || rs6000_tune == PROCESSOR_PPC604
30713 || rs6000_tune == PROCESSOR_PPC604e
30714 || rs6000_tune == PROCESSOR_PPC620
30715 || rs6000_tune == PROCESSOR_PPC630
30716 || rs6000_tune == PROCESSOR_PPC750
30717 || rs6000_tune == PROCESSOR_PPC7400
30718 || rs6000_tune == PROCESSOR_PPC7450
30719 || rs6000_tune == PROCESSOR_PPCE5500
30720 || rs6000_tune == PROCESSOR_PPCE6500
30721 || rs6000_tune == PROCESSOR_POWER4
30722 || rs6000_tune == PROCESSOR_POWER5
30723 || rs6000_tune == PROCESSOR_POWER7
30724 || rs6000_tune == PROCESSOR_POWER8
30725 || rs6000_tune == PROCESSOR_POWER9
30726 || rs6000_tune == PROCESSOR_CELL)
30727 && recog_memoized (dep_insn)
30728 && (INSN_CODE (dep_insn) >= 0))
30730 switch (get_attr_type (dep_insn))
30732 case TYPE_CMP:
30733 case TYPE_FPCOMPARE:
30734 case TYPE_CR_LOGICAL:
30735 return cost + 2;
30736 case TYPE_EXTS:
30737 case TYPE_MUL:
30738 if (get_attr_dot (dep_insn) == DOT_YES)
30739 return cost + 2;
30740 else
30741 break;
30742 case TYPE_SHIFT:
30743 if (get_attr_dot (dep_insn) == DOT_YES
30744 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30745 return cost + 2;
30746 else
30747 break;
30748 default:
30749 break;
30751 break;
30753 case TYPE_STORE:
30754 case TYPE_FPSTORE:
30755 if ((rs6000_tune == PROCESSOR_POWER6)
30756 && recog_memoized (dep_insn)
30757 && (INSN_CODE (dep_insn) >= 0))
30760 if (GET_CODE (PATTERN (insn)) != SET)
30761 /* If this happens, we have to extend this to schedule
30762 optimally. Return default for now. */
30763 return cost;
30765 /* Adjust the cost for the case where the value written
30766 by a fixed point operation is used as the address
30767 gen value on a store. */
30768 switch (get_attr_type (dep_insn))
30770 case TYPE_LOAD:
30771 case TYPE_CNTLZ:
30773 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30774 return get_attr_sign_extend (dep_insn)
30775 == SIGN_EXTEND_YES ? 6 : 4;
30776 break;
30778 case TYPE_SHIFT:
30780 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30781 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30782 6 : 3;
30783 break;
30785 case TYPE_INTEGER:
30786 case TYPE_ADD:
30787 case TYPE_LOGICAL:
30788 case TYPE_EXTS:
30789 case TYPE_INSERT:
30791 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30792 return 3;
30793 break;
30795 case TYPE_STORE:
30796 case TYPE_FPLOAD:
30797 case TYPE_FPSTORE:
30799 if (get_attr_update (dep_insn) == UPDATE_YES
30800 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30801 return 3;
30802 break;
30804 case TYPE_MUL:
30806 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30807 return 17;
30808 break;
30810 case TYPE_DIV:
30812 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30813 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30814 break;
30816 default:
30817 break;
30820 break;
30822 case TYPE_LOAD:
30823 if ((rs6000_tune == PROCESSOR_POWER6)
30824 && recog_memoized (dep_insn)
30825 && (INSN_CODE (dep_insn) >= 0))
30828 /* Adjust the cost for the case where the value written
30829 by a fixed point instruction is used within the address
30830 gen portion of a subsequent load(u)(x) */
30831 switch (get_attr_type (dep_insn))
30833 case TYPE_LOAD:
30834 case TYPE_CNTLZ:
30836 if (set_to_load_agen (dep_insn, insn))
30837 return get_attr_sign_extend (dep_insn)
30838 == SIGN_EXTEND_YES ? 6 : 4;
30839 break;
30841 case TYPE_SHIFT:
30843 if (set_to_load_agen (dep_insn, insn))
30844 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30845 6 : 3;
30846 break;
30848 case TYPE_INTEGER:
30849 case TYPE_ADD:
30850 case TYPE_LOGICAL:
30851 case TYPE_EXTS:
30852 case TYPE_INSERT:
30854 if (set_to_load_agen (dep_insn, insn))
30855 return 3;
30856 break;
30858 case TYPE_STORE:
30859 case TYPE_FPLOAD:
30860 case TYPE_FPSTORE:
30862 if (get_attr_update (dep_insn) == UPDATE_YES
30863 && set_to_load_agen (dep_insn, insn))
30864 return 3;
30865 break;
30867 case TYPE_MUL:
30869 if (set_to_load_agen (dep_insn, insn))
30870 return 17;
30871 break;
30873 case TYPE_DIV:
30875 if (set_to_load_agen (dep_insn, insn))
30876 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30877 break;
30879 default:
30880 break;
30883 break;
30885 case TYPE_FPLOAD:
30886 if ((rs6000_tune == PROCESSOR_POWER6)
30887 && get_attr_update (insn) == UPDATE_NO
30888 && recog_memoized (dep_insn)
30889 && (INSN_CODE (dep_insn) >= 0)
30890 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30891 return 2;
30893 default:
30894 break;
30897 /* Fall out to return default cost. */
30899 break;
30901 case REG_DEP_OUTPUT:
30902 /* Output dependency; DEP_INSN writes a register that INSN writes some
30903 cycles later. */
30904 if ((rs6000_tune == PROCESSOR_POWER6)
30905 && recog_memoized (dep_insn)
30906 && (INSN_CODE (dep_insn) >= 0))
30908 attr_type = get_attr_type (insn);
30910 switch (attr_type)
30912 case TYPE_FP:
30913 case TYPE_FPSIMPLE:
30914 if (get_attr_type (dep_insn) == TYPE_FP
30915 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30916 return 1;
30917 break;
30918 case TYPE_FPLOAD:
30919 if (get_attr_update (insn) == UPDATE_NO
30920 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30921 return 2;
30922 break;
30923 default:
30924 break;
30927 /* Fall through, no cost for output dependency. */
30928 /* FALLTHRU */
30930 case REG_DEP_ANTI:
30931 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30932 cycles later. */
30933 return 0;
30935 default:
30936 gcc_unreachable ();
30939 return cost;
30942 /* Debug version of rs6000_adjust_cost. */
30944 static int
30945 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30946 int cost, unsigned int dw)
30948 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30950 if (ret != cost)
30952 const char *dep;
30954 switch (dep_type)
30956 default: dep = "unknown depencency"; break;
30957 case REG_DEP_TRUE: dep = "data dependency"; break;
30958 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30959 case REG_DEP_ANTI: dep = "anti depencency"; break;
30962 fprintf (stderr,
30963 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30964 "%s, insn:\n", ret, cost, dep);
30966 debug_rtx (insn);
30969 return ret;
30972 /* The function returns a true if INSN is microcoded.
30973 Return false otherwise. */
30975 static bool
30976 is_microcoded_insn (rtx_insn *insn)
30978 if (!insn || !NONDEBUG_INSN_P (insn)
30979 || GET_CODE (PATTERN (insn)) == USE
30980 || GET_CODE (PATTERN (insn)) == CLOBBER)
30981 return false;
30983 if (rs6000_tune == PROCESSOR_CELL)
30984 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30986 if (rs6000_sched_groups
30987 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30989 enum attr_type type = get_attr_type (insn);
30990 if ((type == TYPE_LOAD
30991 && get_attr_update (insn) == UPDATE_YES
30992 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30993 || ((type == TYPE_LOAD || type == TYPE_STORE)
30994 && get_attr_update (insn) == UPDATE_YES
30995 && get_attr_indexed (insn) == INDEXED_YES)
30996 || type == TYPE_MFCR)
30997 return true;
31000 return false;
31003 /* The function returns true if INSN is cracked into 2 instructions
31004 by the processor (and therefore occupies 2 issue slots). */
31006 static bool
31007 is_cracked_insn (rtx_insn *insn)
31009 if (!insn || !NONDEBUG_INSN_P (insn)
31010 || GET_CODE (PATTERN (insn)) == USE
31011 || GET_CODE (PATTERN (insn)) == CLOBBER)
31012 return false;
31014 if (rs6000_sched_groups
31015 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
31017 enum attr_type type = get_attr_type (insn);
31018 if ((type == TYPE_LOAD
31019 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31020 && get_attr_update (insn) == UPDATE_NO)
31021 || (type == TYPE_LOAD
31022 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
31023 && get_attr_update (insn) == UPDATE_YES
31024 && get_attr_indexed (insn) == INDEXED_NO)
31025 || (type == TYPE_STORE
31026 && get_attr_update (insn) == UPDATE_YES
31027 && get_attr_indexed (insn) == INDEXED_NO)
31028 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
31029 && get_attr_update (insn) == UPDATE_YES)
31030 || (type == TYPE_CR_LOGICAL
31031 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
31032 || (type == TYPE_EXTS
31033 && get_attr_dot (insn) == DOT_YES)
31034 || (type == TYPE_SHIFT
31035 && get_attr_dot (insn) == DOT_YES
31036 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
31037 || (type == TYPE_MUL
31038 && get_attr_dot (insn) == DOT_YES)
31039 || type == TYPE_DIV
31040 || (type == TYPE_INSERT
31041 && get_attr_size (insn) == SIZE_32))
31042 return true;
31045 return false;
31048 /* The function returns true if INSN can be issued only from
31049 the branch slot. */
31051 static bool
31052 is_branch_slot_insn (rtx_insn *insn)
31054 if (!insn || !NONDEBUG_INSN_P (insn)
31055 || GET_CODE (PATTERN (insn)) == USE
31056 || GET_CODE (PATTERN (insn)) == CLOBBER)
31057 return false;
31059 if (rs6000_sched_groups)
31061 enum attr_type type = get_attr_type (insn);
31062 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
31063 return true;
31064 return false;
31067 return false;
31070 /* The function returns true if out_inst sets a value that is
31071 used in the address generation computation of in_insn */
31072 static bool
31073 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
31075 rtx out_set, in_set;
31077 /* For performance reasons, only handle the simple case where
31078 both loads are a single_set. */
31079 out_set = single_set (out_insn);
31080 if (out_set)
31082 in_set = single_set (in_insn);
31083 if (in_set)
31084 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
31087 return false;
31090 /* Try to determine base/offset/size parts of the given MEM.
31091 Return true if successful, false if all the values couldn't
31092 be determined.
31094 This function only looks for REG or REG+CONST address forms.
31095 REG+REG address form will return false. */
31097 static bool
31098 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
31099 HOST_WIDE_INT *size)
31101 rtx addr_rtx;
31102 if MEM_SIZE_KNOWN_P (mem)
31103 *size = MEM_SIZE (mem);
31104 else
31105 return false;
31107 addr_rtx = (XEXP (mem, 0));
31108 if (GET_CODE (addr_rtx) == PRE_MODIFY)
31109 addr_rtx = XEXP (addr_rtx, 1);
31111 *offset = 0;
31112 while (GET_CODE (addr_rtx) == PLUS
31113 && CONST_INT_P (XEXP (addr_rtx, 1)))
31115 *offset += INTVAL (XEXP (addr_rtx, 1));
31116 addr_rtx = XEXP (addr_rtx, 0);
31118 if (!REG_P (addr_rtx))
31119 return false;
31121 *base = addr_rtx;
31122 return true;
31125 /* The function returns true if the target storage location of
31126 mem1 is adjacent to the target storage location of mem2 */
31127 /* Return 1 if memory locations are adjacent. */
31129 static bool
31130 adjacent_mem_locations (rtx mem1, rtx mem2)
31132 rtx reg1, reg2;
31133 HOST_WIDE_INT off1, size1, off2, size2;
31135 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31136 && get_memref_parts (mem2, &reg2, &off2, &size2))
31137 return ((REGNO (reg1) == REGNO (reg2))
31138 && ((off1 + size1 == off2)
31139 || (off2 + size2 == off1)));
31141 return false;
31144 /* This function returns true if it can be determined that the two MEM
31145 locations overlap by at least 1 byte based on base reg/offset/size. */
31147 static bool
31148 mem_locations_overlap (rtx mem1, rtx mem2)
31150 rtx reg1, reg2;
31151 HOST_WIDE_INT off1, size1, off2, size2;
31153 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31154 && get_memref_parts (mem2, &reg2, &off2, &size2))
31155 return ((REGNO (reg1) == REGNO (reg2))
31156 && (((off1 <= off2) && (off1 + size1 > off2))
31157 || ((off2 <= off1) && (off2 + size2 > off1))));
31159 return false;
31162 /* A C statement (sans semicolon) to update the integer scheduling
31163 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31164 INSN earlier, reduce the priority to execute INSN later. Do not
31165 define this macro if you do not need to adjust the scheduling
31166 priorities of insns. */
31168 static int
31169 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31171 rtx load_mem, str_mem;
31172 /* On machines (like the 750) which have asymmetric integer units,
31173 where one integer unit can do multiply and divides and the other
31174 can't, reduce the priority of multiply/divide so it is scheduled
31175 before other integer operations. */
31177 #if 0
31178 if (! INSN_P (insn))
31179 return priority;
31181 if (GET_CODE (PATTERN (insn)) == USE)
31182 return priority;
31184 switch (rs6000_tune) {
31185 case PROCESSOR_PPC750:
31186 switch (get_attr_type (insn))
31188 default:
31189 break;
31191 case TYPE_MUL:
31192 case TYPE_DIV:
31193 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31194 priority, priority);
31195 if (priority >= 0 && priority < 0x01000000)
31196 priority >>= 3;
31197 break;
31200 #endif
31202 if (insn_must_be_first_in_group (insn)
31203 && reload_completed
31204 && current_sched_info->sched_max_insns_priority
31205 && rs6000_sched_restricted_insns_priority)
31208 /* Prioritize insns that can be dispatched only in the first
31209 dispatch slot. */
31210 if (rs6000_sched_restricted_insns_priority == 1)
31211 /* Attach highest priority to insn. This means that in
31212 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31213 precede 'priority' (critical path) considerations. */
31214 return current_sched_info->sched_max_insns_priority;
31215 else if (rs6000_sched_restricted_insns_priority == 2)
31216 /* Increase priority of insn by a minimal amount. This means that in
31217 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31218 considerations precede dispatch-slot restriction considerations. */
31219 return (priority + 1);
31222 if (rs6000_tune == PROCESSOR_POWER6
31223 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31224 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31225 /* Attach highest priority to insn if the scheduler has just issued two
31226 stores and this instruction is a load, or two loads and this instruction
31227 is a store. Power6 wants loads and stores scheduled alternately
31228 when possible */
31229 return current_sched_info->sched_max_insns_priority;
31231 return priority;
31234 /* Return true if the instruction is nonpipelined on the Cell. */
31235 static bool
31236 is_nonpipeline_insn (rtx_insn *insn)
31238 enum attr_type type;
31239 if (!insn || !NONDEBUG_INSN_P (insn)
31240 || GET_CODE (PATTERN (insn)) == USE
31241 || GET_CODE (PATTERN (insn)) == CLOBBER)
31242 return false;
31244 type = get_attr_type (insn);
31245 if (type == TYPE_MUL
31246 || type == TYPE_DIV
31247 || type == TYPE_SDIV
31248 || type == TYPE_DDIV
31249 || type == TYPE_SSQRT
31250 || type == TYPE_DSQRT
31251 || type == TYPE_MFCR
31252 || type == TYPE_MFCRF
31253 || type == TYPE_MFJMPR)
31255 return true;
31257 return false;
31261 /* Return how many instructions the machine can issue per cycle. */
31263 static int
31264 rs6000_issue_rate (void)
31266 /* Unless scheduling for register pressure, use issue rate of 1 for
31267 first scheduling pass to decrease degradation. */
31268 if (!reload_completed && !flag_sched_pressure)
31269 return 1;
31271 switch (rs6000_tune) {
31272 case PROCESSOR_RS64A:
31273 case PROCESSOR_PPC601: /* ? */
31274 case PROCESSOR_PPC7450:
31275 return 3;
31276 case PROCESSOR_PPC440:
31277 case PROCESSOR_PPC603:
31278 case PROCESSOR_PPC750:
31279 case PROCESSOR_PPC7400:
31280 case PROCESSOR_PPC8540:
31281 case PROCESSOR_PPC8548:
31282 case PROCESSOR_CELL:
31283 case PROCESSOR_PPCE300C2:
31284 case PROCESSOR_PPCE300C3:
31285 case PROCESSOR_PPCE500MC:
31286 case PROCESSOR_PPCE500MC64:
31287 case PROCESSOR_PPCE5500:
31288 case PROCESSOR_PPCE6500:
31289 case PROCESSOR_TITAN:
31290 return 2;
31291 case PROCESSOR_PPC476:
31292 case PROCESSOR_PPC604:
31293 case PROCESSOR_PPC604e:
31294 case PROCESSOR_PPC620:
31295 case PROCESSOR_PPC630:
31296 return 4;
31297 case PROCESSOR_POWER4:
31298 case PROCESSOR_POWER5:
31299 case PROCESSOR_POWER6:
31300 case PROCESSOR_POWER7:
31301 return 5;
31302 case PROCESSOR_POWER8:
31303 return 7;
31304 case PROCESSOR_POWER9:
31305 return 6;
31306 default:
31307 return 1;
31311 /* Return how many instructions to look ahead for better insn
31312 scheduling. */
31314 static int
31315 rs6000_use_sched_lookahead (void)
31317 switch (rs6000_tune)
31319 case PROCESSOR_PPC8540:
31320 case PROCESSOR_PPC8548:
31321 return 4;
31323 case PROCESSOR_CELL:
31324 return (reload_completed ? 8 : 0);
31326 default:
31327 return 0;
31331 /* We are choosing insn from the ready queue. Return zero if INSN can be
31332 chosen. */
31333 static int
31334 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31336 if (ready_index == 0)
31337 return 0;
31339 if (rs6000_tune != PROCESSOR_CELL)
31340 return 0;
31342 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31344 if (!reload_completed
31345 || is_nonpipeline_insn (insn)
31346 || is_microcoded_insn (insn))
31347 return 1;
31349 return 0;
31352 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31353 and return true. */
31355 static bool
31356 find_mem_ref (rtx pat, rtx *mem_ref)
31358 const char * fmt;
31359 int i, j;
31361 /* stack_tie does not produce any real memory traffic. */
31362 if (tie_operand (pat, VOIDmode))
31363 return false;
31365 if (GET_CODE (pat) == MEM)
31367 *mem_ref = pat;
31368 return true;
31371 /* Recursively process the pattern. */
31372 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31374 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31376 if (fmt[i] == 'e')
31378 if (find_mem_ref (XEXP (pat, i), mem_ref))
31379 return true;
31381 else if (fmt[i] == 'E')
31382 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31384 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31385 return true;
31389 return false;
31392 /* Determine if PAT is a PATTERN of a load insn. */
31394 static bool
31395 is_load_insn1 (rtx pat, rtx *load_mem)
31397 if (!pat || pat == NULL_RTX)
31398 return false;
31400 if (GET_CODE (pat) == SET)
31401 return find_mem_ref (SET_SRC (pat), load_mem);
31403 if (GET_CODE (pat) == PARALLEL)
31405 int i;
31407 for (i = 0; i < XVECLEN (pat, 0); i++)
31408 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31409 return true;
31412 return false;
31415 /* Determine if INSN loads from memory. */
31417 static bool
31418 is_load_insn (rtx insn, rtx *load_mem)
31420 if (!insn || !INSN_P (insn))
31421 return false;
31423 if (CALL_P (insn))
31424 return false;
31426 return is_load_insn1 (PATTERN (insn), load_mem);
31429 /* Determine if PAT is a PATTERN of a store insn. */
31431 static bool
31432 is_store_insn1 (rtx pat, rtx *str_mem)
31434 if (!pat || pat == NULL_RTX)
31435 return false;
31437 if (GET_CODE (pat) == SET)
31438 return find_mem_ref (SET_DEST (pat), str_mem);
31440 if (GET_CODE (pat) == PARALLEL)
31442 int i;
31444 for (i = 0; i < XVECLEN (pat, 0); i++)
31445 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31446 return true;
31449 return false;
31452 /* Determine if INSN stores to memory. */
31454 static bool
31455 is_store_insn (rtx insn, rtx *str_mem)
31457 if (!insn || !INSN_P (insn))
31458 return false;
31460 return is_store_insn1 (PATTERN (insn), str_mem);
31463 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31465 static bool
31466 is_power9_pairable_vec_type (enum attr_type type)
31468 switch (type)
31470 case TYPE_VECSIMPLE:
31471 case TYPE_VECCOMPLEX:
31472 case TYPE_VECDIV:
31473 case TYPE_VECCMP:
31474 case TYPE_VECPERM:
31475 case TYPE_VECFLOAT:
31476 case TYPE_VECFDIV:
31477 case TYPE_VECDOUBLE:
31478 return true;
31479 default:
31480 break;
31482 return false;
31485 /* Returns whether the dependence between INSN and NEXT is considered
31486 costly by the given target. */
31488 static bool
31489 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31491 rtx insn;
31492 rtx next;
31493 rtx load_mem, str_mem;
31495 /* If the flag is not enabled - no dependence is considered costly;
31496 allow all dependent insns in the same group.
31497 This is the most aggressive option. */
31498 if (rs6000_sched_costly_dep == no_dep_costly)
31499 return false;
31501 /* If the flag is set to 1 - a dependence is always considered costly;
31502 do not allow dependent instructions in the same group.
31503 This is the most conservative option. */
31504 if (rs6000_sched_costly_dep == all_deps_costly)
31505 return true;
31507 insn = DEP_PRO (dep);
31508 next = DEP_CON (dep);
31510 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31511 && is_load_insn (next, &load_mem)
31512 && is_store_insn (insn, &str_mem))
31513 /* Prevent load after store in the same group. */
31514 return true;
31516 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31517 && is_load_insn (next, &load_mem)
31518 && is_store_insn (insn, &str_mem)
31519 && DEP_TYPE (dep) == REG_DEP_TRUE
31520 && mem_locations_overlap(str_mem, load_mem))
31521 /* Prevent load after store in the same group if it is a true
31522 dependence. */
31523 return true;
31525 /* The flag is set to X; dependences with latency >= X are considered costly,
31526 and will not be scheduled in the same group. */
31527 if (rs6000_sched_costly_dep <= max_dep_latency
31528 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31529 return true;
31531 return false;
31534 /* Return the next insn after INSN that is found before TAIL is reached,
31535 skipping any "non-active" insns - insns that will not actually occupy
31536 an issue slot. Return NULL_RTX if such an insn is not found. */
31538 static rtx_insn *
31539 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31541 if (insn == NULL_RTX || insn == tail)
31542 return NULL;
31544 while (1)
31546 insn = NEXT_INSN (insn);
31547 if (insn == NULL_RTX || insn == tail)
31548 return NULL;
31550 if (CALL_P (insn)
31551 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31552 || (NONJUMP_INSN_P (insn)
31553 && GET_CODE (PATTERN (insn)) != USE
31554 && GET_CODE (PATTERN (insn)) != CLOBBER
31555 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31556 break;
31558 return insn;
31561 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31563 static int
31564 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31566 int pos;
31567 int i;
31568 rtx_insn *tmp;
31569 enum attr_type type, type2;
31571 type = get_attr_type (last_scheduled_insn);
31573 /* Try to issue fixed point divides back-to-back in pairs so they will be
31574 routed to separate execution units and execute in parallel. */
31575 if (type == TYPE_DIV && divide_cnt == 0)
31577 /* First divide has been scheduled. */
31578 divide_cnt = 1;
31580 /* Scan the ready list looking for another divide, if found move it
31581 to the end of the list so it is chosen next. */
31582 pos = lastpos;
31583 while (pos >= 0)
31585 if (recog_memoized (ready[pos]) >= 0
31586 && get_attr_type (ready[pos]) == TYPE_DIV)
31588 tmp = ready[pos];
31589 for (i = pos; i < lastpos; i++)
31590 ready[i] = ready[i + 1];
31591 ready[lastpos] = tmp;
31592 break;
31594 pos--;
31597 else
31599 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31600 divide_cnt = 0;
31602 /* The best dispatch throughput for vector and vector load insns can be
31603 achieved by interleaving a vector and vector load such that they'll
31604 dispatch to the same superslice. If this pairing cannot be achieved
31605 then it is best to pair vector insns together and vector load insns
31606 together.
31608 To aid in this pairing, vec_pairing maintains the current state with
31609 the following values:
31611 0 : Initial state, no vecload/vector pairing has been started.
31613 1 : A vecload or vector insn has been issued and a candidate for
31614 pairing has been found and moved to the end of the ready
31615 list. */
31616 if (type == TYPE_VECLOAD)
31618 /* Issued a vecload. */
31619 if (vec_pairing == 0)
31621 int vecload_pos = -1;
31622 /* We issued a single vecload, look for a vector insn to pair it
31623 with. If one isn't found, try to pair another vecload. */
31624 pos = lastpos;
31625 while (pos >= 0)
31627 if (recog_memoized (ready[pos]) >= 0)
31629 type2 = get_attr_type (ready[pos]);
31630 if (is_power9_pairable_vec_type (type2))
31632 /* Found a vector insn to pair with, move it to the
31633 end of the ready list so it is scheduled next. */
31634 tmp = ready[pos];
31635 for (i = pos; i < lastpos; i++)
31636 ready[i] = ready[i + 1];
31637 ready[lastpos] = tmp;
31638 vec_pairing = 1;
31639 return cached_can_issue_more;
31641 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31642 /* Remember position of first vecload seen. */
31643 vecload_pos = pos;
31645 pos--;
31647 if (vecload_pos >= 0)
31649 /* Didn't find a vector to pair with but did find a vecload,
31650 move it to the end of the ready list. */
31651 tmp = ready[vecload_pos];
31652 for (i = vecload_pos; i < lastpos; i++)
31653 ready[i] = ready[i + 1];
31654 ready[lastpos] = tmp;
31655 vec_pairing = 1;
31656 return cached_can_issue_more;
31660 else if (is_power9_pairable_vec_type (type))
31662 /* Issued a vector operation. */
31663 if (vec_pairing == 0)
31665 int vec_pos = -1;
31666 /* We issued a single vector insn, look for a vecload to pair it
31667 with. If one isn't found, try to pair another vector. */
31668 pos = lastpos;
31669 while (pos >= 0)
31671 if (recog_memoized (ready[pos]) >= 0)
31673 type2 = get_attr_type (ready[pos]);
31674 if (type2 == TYPE_VECLOAD)
31676 /* Found a vecload insn to pair with, move it to the
31677 end of the ready list so it is scheduled next. */
31678 tmp = ready[pos];
31679 for (i = pos; i < lastpos; i++)
31680 ready[i] = ready[i + 1];
31681 ready[lastpos] = tmp;
31682 vec_pairing = 1;
31683 return cached_can_issue_more;
31685 else if (is_power9_pairable_vec_type (type2)
31686 && vec_pos == -1)
31687 /* Remember position of first vector insn seen. */
31688 vec_pos = pos;
31690 pos--;
31692 if (vec_pos >= 0)
31694 /* Didn't find a vecload to pair with but did find a vector
31695 insn, move it to the end of the ready list. */
31696 tmp = ready[vec_pos];
31697 for (i = vec_pos; i < lastpos; i++)
31698 ready[i] = ready[i + 1];
31699 ready[lastpos] = tmp;
31700 vec_pairing = 1;
31701 return cached_can_issue_more;
31706 /* We've either finished a vec/vecload pair, couldn't find an insn to
31707 continue the current pair, or the last insn had nothing to do with
31708 with pairing. In any case, reset the state. */
31709 vec_pairing = 0;
31712 return cached_can_issue_more;
31715 /* We are about to begin issuing insns for this clock cycle. */
31717 static int
31718 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31719 rtx_insn **ready ATTRIBUTE_UNUSED,
31720 int *pn_ready ATTRIBUTE_UNUSED,
31721 int clock_var ATTRIBUTE_UNUSED)
31723 int n_ready = *pn_ready;
31725 if (sched_verbose)
31726 fprintf (dump, "// rs6000_sched_reorder :\n");
31728 /* Reorder the ready list, if the second to last ready insn
31729 is a nonepipeline insn. */
31730 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31732 if (is_nonpipeline_insn (ready[n_ready - 1])
31733 && (recog_memoized (ready[n_ready - 2]) > 0))
31734 /* Simply swap first two insns. */
31735 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31738 if (rs6000_tune == PROCESSOR_POWER6)
31739 load_store_pendulum = 0;
31741 return rs6000_issue_rate ();
31744 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31746 static int
31747 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31748 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31750 if (sched_verbose)
31751 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31753 /* For Power6, we need to handle some special cases to try and keep the
31754 store queue from overflowing and triggering expensive flushes.
31756 This code monitors how load and store instructions are being issued
31757 and skews the ready list one way or the other to increase the likelihood
31758 that a desired instruction is issued at the proper time.
31760 A couple of things are done. First, we maintain a "load_store_pendulum"
31761 to track the current state of load/store issue.
31763 - If the pendulum is at zero, then no loads or stores have been
31764 issued in the current cycle so we do nothing.
31766 - If the pendulum is 1, then a single load has been issued in this
31767 cycle and we attempt to locate another load in the ready list to
31768 issue with it.
31770 - If the pendulum is -2, then two stores have already been
31771 issued in this cycle, so we increase the priority of the first load
31772 in the ready list to increase it's likelihood of being chosen first
31773 in the next cycle.
31775 - If the pendulum is -1, then a single store has been issued in this
31776 cycle and we attempt to locate another store in the ready list to
31777 issue with it, preferring a store to an adjacent memory location to
31778 facilitate store pairing in the store queue.
31780 - If the pendulum is 2, then two loads have already been
31781 issued in this cycle, so we increase the priority of the first store
31782 in the ready list to increase it's likelihood of being chosen first
31783 in the next cycle.
31785 - If the pendulum < -2 or > 2, then do nothing.
31787 Note: This code covers the most common scenarios. There exist non
31788 load/store instructions which make use of the LSU and which
31789 would need to be accounted for to strictly model the behavior
31790 of the machine. Those instructions are currently unaccounted
31791 for to help minimize compile time overhead of this code.
31793 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31795 int pos;
31796 int i;
31797 rtx_insn *tmp;
31798 rtx load_mem, str_mem;
31800 if (is_store_insn (last_scheduled_insn, &str_mem))
31801 /* Issuing a store, swing the load_store_pendulum to the left */
31802 load_store_pendulum--;
31803 else if (is_load_insn (last_scheduled_insn, &load_mem))
31804 /* Issuing a load, swing the load_store_pendulum to the right */
31805 load_store_pendulum++;
31806 else
31807 return cached_can_issue_more;
31809 /* If the pendulum is balanced, or there is only one instruction on
31810 the ready list, then all is well, so return. */
31811 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31812 return cached_can_issue_more;
31814 if (load_store_pendulum == 1)
31816 /* A load has been issued in this cycle. Scan the ready list
31817 for another load to issue with it */
31818 pos = *pn_ready-1;
31820 while (pos >= 0)
31822 if (is_load_insn (ready[pos], &load_mem))
31824 /* Found a load. Move it to the head of the ready list,
31825 and adjust it's priority so that it is more likely to
31826 stay there */
31827 tmp = ready[pos];
31828 for (i=pos; i<*pn_ready-1; i++)
31829 ready[i] = ready[i + 1];
31830 ready[*pn_ready-1] = tmp;
31832 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31833 INSN_PRIORITY (tmp)++;
31834 break;
31836 pos--;
31839 else if (load_store_pendulum == -2)
31841 /* Two stores have been issued in this cycle. Increase the
31842 priority of the first load in the ready list to favor it for
31843 issuing in the next cycle. */
31844 pos = *pn_ready-1;
31846 while (pos >= 0)
31848 if (is_load_insn (ready[pos], &load_mem)
31849 && !sel_sched_p ()
31850 && INSN_PRIORITY_KNOWN (ready[pos]))
31852 INSN_PRIORITY (ready[pos])++;
31854 /* Adjust the pendulum to account for the fact that a load
31855 was found and increased in priority. This is to prevent
31856 increasing the priority of multiple loads */
31857 load_store_pendulum--;
31859 break;
31861 pos--;
31864 else if (load_store_pendulum == -1)
31866 /* A store has been issued in this cycle. Scan the ready list for
31867 another store to issue with it, preferring a store to an adjacent
31868 memory location */
31869 int first_store_pos = -1;
31871 pos = *pn_ready-1;
31873 while (pos >= 0)
31875 if (is_store_insn (ready[pos], &str_mem))
31877 rtx str_mem2;
31878 /* Maintain the index of the first store found on the
31879 list */
31880 if (first_store_pos == -1)
31881 first_store_pos = pos;
31883 if (is_store_insn (last_scheduled_insn, &str_mem2)
31884 && adjacent_mem_locations (str_mem, str_mem2))
31886 /* Found an adjacent store. Move it to the head of the
31887 ready list, and adjust it's priority so that it is
31888 more likely to stay there */
31889 tmp = ready[pos];
31890 for (i=pos; i<*pn_ready-1; i++)
31891 ready[i] = ready[i + 1];
31892 ready[*pn_ready-1] = tmp;
31894 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31895 INSN_PRIORITY (tmp)++;
31897 first_store_pos = -1;
31899 break;
31902 pos--;
31905 if (first_store_pos >= 0)
31907 /* An adjacent store wasn't found, but a non-adjacent store was,
31908 so move the non-adjacent store to the front of the ready
31909 list, and adjust its priority so that it is more likely to
31910 stay there. */
31911 tmp = ready[first_store_pos];
31912 for (i=first_store_pos; i<*pn_ready-1; i++)
31913 ready[i] = ready[i + 1];
31914 ready[*pn_ready-1] = tmp;
31915 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31916 INSN_PRIORITY (tmp)++;
31919 else if (load_store_pendulum == 2)
31921 /* Two loads have been issued in this cycle. Increase the priority
31922 of the first store in the ready list to favor it for issuing in
31923 the next cycle. */
31924 pos = *pn_ready-1;
31926 while (pos >= 0)
31928 if (is_store_insn (ready[pos], &str_mem)
31929 && !sel_sched_p ()
31930 && INSN_PRIORITY_KNOWN (ready[pos]))
31932 INSN_PRIORITY (ready[pos])++;
31934 /* Adjust the pendulum to account for the fact that a store
31935 was found and increased in priority. This is to prevent
31936 increasing the priority of multiple stores */
31937 load_store_pendulum++;
31939 break;
31941 pos--;
31946 /* Do Power9 dependent reordering if necessary. */
31947 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31948 && recog_memoized (last_scheduled_insn) >= 0)
31949 return power9_sched_reorder2 (ready, *pn_ready - 1);
31951 return cached_can_issue_more;
31954 /* Return whether the presence of INSN causes a dispatch group termination
31955 of group WHICH_GROUP.
31957 If WHICH_GROUP == current_group, this function will return true if INSN
31958 causes the termination of the current group (i.e, the dispatch group to
31959 which INSN belongs). This means that INSN will be the last insn in the
31960 group it belongs to.
31962 If WHICH_GROUP == previous_group, this function will return true if INSN
31963 causes the termination of the previous group (i.e, the dispatch group that
31964 precedes the group to which INSN belongs). This means that INSN will be
31965 the first insn in the group it belongs to). */
31967 static bool
31968 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31970 bool first, last;
31972 if (! insn)
31973 return false;
31975 first = insn_must_be_first_in_group (insn);
31976 last = insn_must_be_last_in_group (insn);
31978 if (first && last)
31979 return true;
31981 if (which_group == current_group)
31982 return last;
31983 else if (which_group == previous_group)
31984 return first;
31986 return false;
31990 static bool
31991 insn_must_be_first_in_group (rtx_insn *insn)
31993 enum attr_type type;
31995 if (!insn
31996 || NOTE_P (insn)
31997 || DEBUG_INSN_P (insn)
31998 || GET_CODE (PATTERN (insn)) == USE
31999 || GET_CODE (PATTERN (insn)) == CLOBBER)
32000 return false;
32002 switch (rs6000_tune)
32004 case PROCESSOR_POWER5:
32005 if (is_cracked_insn (insn))
32006 return true;
32007 /* FALLTHRU */
32008 case PROCESSOR_POWER4:
32009 if (is_microcoded_insn (insn))
32010 return true;
32012 if (!rs6000_sched_groups)
32013 return false;
32015 type = get_attr_type (insn);
32017 switch (type)
32019 case TYPE_MFCR:
32020 case TYPE_MFCRF:
32021 case TYPE_MTCR:
32022 case TYPE_CR_LOGICAL:
32023 case TYPE_MTJMPR:
32024 case TYPE_MFJMPR:
32025 case TYPE_DIV:
32026 case TYPE_LOAD_L:
32027 case TYPE_STORE_C:
32028 case TYPE_ISYNC:
32029 case TYPE_SYNC:
32030 return true;
32031 default:
32032 break;
32034 break;
32035 case PROCESSOR_POWER6:
32036 type = get_attr_type (insn);
32038 switch (type)
32040 case TYPE_EXTS:
32041 case TYPE_CNTLZ:
32042 case TYPE_TRAP:
32043 case TYPE_MUL:
32044 case TYPE_INSERT:
32045 case TYPE_FPCOMPARE:
32046 case TYPE_MFCR:
32047 case TYPE_MTCR:
32048 case TYPE_MFJMPR:
32049 case TYPE_MTJMPR:
32050 case TYPE_ISYNC:
32051 case TYPE_SYNC:
32052 case TYPE_LOAD_L:
32053 case TYPE_STORE_C:
32054 return true;
32055 case TYPE_SHIFT:
32056 if (get_attr_dot (insn) == DOT_NO
32057 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32058 return true;
32059 else
32060 break;
32061 case TYPE_DIV:
32062 if (get_attr_size (insn) == SIZE_32)
32063 return true;
32064 else
32065 break;
32066 case TYPE_LOAD:
32067 case TYPE_STORE:
32068 case TYPE_FPLOAD:
32069 case TYPE_FPSTORE:
32070 if (get_attr_update (insn) == UPDATE_YES)
32071 return true;
32072 else
32073 break;
32074 default:
32075 break;
32077 break;
32078 case PROCESSOR_POWER7:
32079 type = get_attr_type (insn);
32081 switch (type)
32083 case TYPE_CR_LOGICAL:
32084 case TYPE_MFCR:
32085 case TYPE_MFCRF:
32086 case TYPE_MTCR:
32087 case TYPE_DIV:
32088 case TYPE_ISYNC:
32089 case TYPE_LOAD_L:
32090 case TYPE_STORE_C:
32091 case TYPE_MFJMPR:
32092 case TYPE_MTJMPR:
32093 return true;
32094 case TYPE_MUL:
32095 case TYPE_SHIFT:
32096 case TYPE_EXTS:
32097 if (get_attr_dot (insn) == DOT_YES)
32098 return true;
32099 else
32100 break;
32101 case TYPE_LOAD:
32102 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32103 || get_attr_update (insn) == UPDATE_YES)
32104 return true;
32105 else
32106 break;
32107 case TYPE_STORE:
32108 case TYPE_FPLOAD:
32109 case TYPE_FPSTORE:
32110 if (get_attr_update (insn) == UPDATE_YES)
32111 return true;
32112 else
32113 break;
32114 default:
32115 break;
32117 break;
32118 case PROCESSOR_POWER8:
32119 type = get_attr_type (insn);
32121 switch (type)
32123 case TYPE_CR_LOGICAL:
32124 case TYPE_MFCR:
32125 case TYPE_MFCRF:
32126 case TYPE_MTCR:
32127 case TYPE_SYNC:
32128 case TYPE_ISYNC:
32129 case TYPE_LOAD_L:
32130 case TYPE_STORE_C:
32131 case TYPE_VECSTORE:
32132 case TYPE_MFJMPR:
32133 case TYPE_MTJMPR:
32134 return true;
32135 case TYPE_SHIFT:
32136 case TYPE_EXTS:
32137 case TYPE_MUL:
32138 if (get_attr_dot (insn) == DOT_YES)
32139 return true;
32140 else
32141 break;
32142 case TYPE_LOAD:
32143 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32144 || get_attr_update (insn) == UPDATE_YES)
32145 return true;
32146 else
32147 break;
32148 case TYPE_STORE:
32149 if (get_attr_update (insn) == UPDATE_YES
32150 && get_attr_indexed (insn) == INDEXED_YES)
32151 return true;
32152 else
32153 break;
32154 default:
32155 break;
32157 break;
32158 default:
32159 break;
32162 return false;
32165 static bool
32166 insn_must_be_last_in_group (rtx_insn *insn)
32168 enum attr_type type;
32170 if (!insn
32171 || NOTE_P (insn)
32172 || DEBUG_INSN_P (insn)
32173 || GET_CODE (PATTERN (insn)) == USE
32174 || GET_CODE (PATTERN (insn)) == CLOBBER)
32175 return false;
32177 switch (rs6000_tune) {
32178 case PROCESSOR_POWER4:
32179 case PROCESSOR_POWER5:
32180 if (is_microcoded_insn (insn))
32181 return true;
32183 if (is_branch_slot_insn (insn))
32184 return true;
32186 break;
32187 case PROCESSOR_POWER6:
32188 type = get_attr_type (insn);
32190 switch (type)
32192 case TYPE_EXTS:
32193 case TYPE_CNTLZ:
32194 case TYPE_TRAP:
32195 case TYPE_MUL:
32196 case TYPE_FPCOMPARE:
32197 case TYPE_MFCR:
32198 case TYPE_MTCR:
32199 case TYPE_MFJMPR:
32200 case TYPE_MTJMPR:
32201 case TYPE_ISYNC:
32202 case TYPE_SYNC:
32203 case TYPE_LOAD_L:
32204 case TYPE_STORE_C:
32205 return true;
32206 case TYPE_SHIFT:
32207 if (get_attr_dot (insn) == DOT_NO
32208 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32209 return true;
32210 else
32211 break;
32212 case TYPE_DIV:
32213 if (get_attr_size (insn) == SIZE_32)
32214 return true;
32215 else
32216 break;
32217 default:
32218 break;
32220 break;
32221 case PROCESSOR_POWER7:
32222 type = get_attr_type (insn);
32224 switch (type)
32226 case TYPE_ISYNC:
32227 case TYPE_SYNC:
32228 case TYPE_LOAD_L:
32229 case TYPE_STORE_C:
32230 return true;
32231 case TYPE_LOAD:
32232 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32233 && get_attr_update (insn) == UPDATE_YES)
32234 return true;
32235 else
32236 break;
32237 case TYPE_STORE:
32238 if (get_attr_update (insn) == UPDATE_YES
32239 && get_attr_indexed (insn) == INDEXED_YES)
32240 return true;
32241 else
32242 break;
32243 default:
32244 break;
32246 break;
32247 case PROCESSOR_POWER8:
32248 type = get_attr_type (insn);
32250 switch (type)
32252 case TYPE_MFCR:
32253 case TYPE_MTCR:
32254 case TYPE_ISYNC:
32255 case TYPE_SYNC:
32256 case TYPE_LOAD_L:
32257 case TYPE_STORE_C:
32258 return true;
32259 case TYPE_LOAD:
32260 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32261 && get_attr_update (insn) == UPDATE_YES)
32262 return true;
32263 else
32264 break;
32265 case TYPE_STORE:
32266 if (get_attr_update (insn) == UPDATE_YES
32267 && get_attr_indexed (insn) == INDEXED_YES)
32268 return true;
32269 else
32270 break;
32271 default:
32272 break;
32274 break;
32275 default:
32276 break;
32279 return false;
32282 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32283 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32285 static bool
32286 is_costly_group (rtx *group_insns, rtx next_insn)
32288 int i;
32289 int issue_rate = rs6000_issue_rate ();
32291 for (i = 0; i < issue_rate; i++)
32293 sd_iterator_def sd_it;
32294 dep_t dep;
32295 rtx insn = group_insns[i];
32297 if (!insn)
32298 continue;
32300 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32302 rtx next = DEP_CON (dep);
32304 if (next == next_insn
32305 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32306 return true;
32310 return false;
32313 /* Utility of the function redefine_groups.
32314 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32315 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32316 to keep it "far" (in a separate group) from GROUP_INSNS, following
32317 one of the following schemes, depending on the value of the flag
32318 -minsert_sched_nops = X:
32319 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32320 in order to force NEXT_INSN into a separate group.
32321 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32322 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32323 insertion (has a group just ended, how many vacant issue slots remain in the
32324 last group, and how many dispatch groups were encountered so far). */
32326 static int
32327 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32328 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32329 int *group_count)
32331 rtx nop;
32332 bool force;
32333 int issue_rate = rs6000_issue_rate ();
32334 bool end = *group_end;
32335 int i;
32337 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32338 return can_issue_more;
32340 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32341 return can_issue_more;
32343 force = is_costly_group (group_insns, next_insn);
32344 if (!force)
32345 return can_issue_more;
32347 if (sched_verbose > 6)
32348 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32349 *group_count ,can_issue_more);
32351 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32353 if (*group_end)
32354 can_issue_more = 0;
32356 /* Since only a branch can be issued in the last issue_slot, it is
32357 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32358 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32359 in this case the last nop will start a new group and the branch
32360 will be forced to the new group. */
32361 if (can_issue_more && !is_branch_slot_insn (next_insn))
32362 can_issue_more--;
32364 /* Do we have a special group ending nop? */
32365 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32366 || rs6000_tune == PROCESSOR_POWER8)
32368 nop = gen_group_ending_nop ();
32369 emit_insn_before (nop, next_insn);
32370 can_issue_more = 0;
32372 else
32373 while (can_issue_more > 0)
32375 nop = gen_nop ();
32376 emit_insn_before (nop, next_insn);
32377 can_issue_more--;
32380 *group_end = true;
32381 return 0;
32384 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32386 int n_nops = rs6000_sched_insert_nops;
32388 /* Nops can't be issued from the branch slot, so the effective
32389 issue_rate for nops is 'issue_rate - 1'. */
32390 if (can_issue_more == 0)
32391 can_issue_more = issue_rate;
32392 can_issue_more--;
32393 if (can_issue_more == 0)
32395 can_issue_more = issue_rate - 1;
32396 (*group_count)++;
32397 end = true;
32398 for (i = 0; i < issue_rate; i++)
32400 group_insns[i] = 0;
32404 while (n_nops > 0)
32406 nop = gen_nop ();
32407 emit_insn_before (nop, next_insn);
32408 if (can_issue_more == issue_rate - 1) /* new group begins */
32409 end = false;
32410 can_issue_more--;
32411 if (can_issue_more == 0)
32413 can_issue_more = issue_rate - 1;
32414 (*group_count)++;
32415 end = true;
32416 for (i = 0; i < issue_rate; i++)
32418 group_insns[i] = 0;
32421 n_nops--;
32424 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32425 can_issue_more++;
32427 /* Is next_insn going to start a new group? */
32428 *group_end
32429 = (end
32430 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32431 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32432 || (can_issue_more < issue_rate &&
32433 insn_terminates_group_p (next_insn, previous_group)));
32434 if (*group_end && end)
32435 (*group_count)--;
32437 if (sched_verbose > 6)
32438 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32439 *group_count, can_issue_more);
32440 return can_issue_more;
32443 return can_issue_more;
32446 /* This function tries to synch the dispatch groups that the compiler "sees"
32447 with the dispatch groups that the processor dispatcher is expected to
32448 form in practice. It tries to achieve this synchronization by forcing the
32449 estimated processor grouping on the compiler (as opposed to the function
32450 'pad_goups' which tries to force the scheduler's grouping on the processor).
32452 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32453 examines the (estimated) dispatch groups that will be formed by the processor
32454 dispatcher. It marks these group boundaries to reflect the estimated
32455 processor grouping, overriding the grouping that the scheduler had marked.
32456 Depending on the value of the flag '-minsert-sched-nops' this function can
32457 force certain insns into separate groups or force a certain distance between
32458 them by inserting nops, for example, if there exists a "costly dependence"
32459 between the insns.
32461 The function estimates the group boundaries that the processor will form as
32462 follows: It keeps track of how many vacant issue slots are available after
32463 each insn. A subsequent insn will start a new group if one of the following
32464 4 cases applies:
32465 - no more vacant issue slots remain in the current dispatch group.
32466 - only the last issue slot, which is the branch slot, is vacant, but the next
32467 insn is not a branch.
32468 - only the last 2 or less issue slots, including the branch slot, are vacant,
32469 which means that a cracked insn (which occupies two issue slots) can't be
32470 issued in this group.
32471 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32472 start a new group. */
32474 static int
32475 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32476 rtx_insn *tail)
32478 rtx_insn *insn, *next_insn;
32479 int issue_rate;
32480 int can_issue_more;
32481 int slot, i;
32482 bool group_end;
32483 int group_count = 0;
32484 rtx *group_insns;
32486 /* Initialize. */
32487 issue_rate = rs6000_issue_rate ();
32488 group_insns = XALLOCAVEC (rtx, issue_rate);
32489 for (i = 0; i < issue_rate; i++)
32491 group_insns[i] = 0;
32493 can_issue_more = issue_rate;
32494 slot = 0;
32495 insn = get_next_active_insn (prev_head_insn, tail);
32496 group_end = false;
32498 while (insn != NULL_RTX)
32500 slot = (issue_rate - can_issue_more);
32501 group_insns[slot] = insn;
32502 can_issue_more =
32503 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32504 if (insn_terminates_group_p (insn, current_group))
32505 can_issue_more = 0;
32507 next_insn = get_next_active_insn (insn, tail);
32508 if (next_insn == NULL_RTX)
32509 return group_count + 1;
32511 /* Is next_insn going to start a new group? */
32512 group_end
32513 = (can_issue_more == 0
32514 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32515 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32516 || (can_issue_more < issue_rate &&
32517 insn_terminates_group_p (next_insn, previous_group)));
32519 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32520 next_insn, &group_end, can_issue_more,
32521 &group_count);
32523 if (group_end)
32525 group_count++;
32526 can_issue_more = 0;
32527 for (i = 0; i < issue_rate; i++)
32529 group_insns[i] = 0;
32533 if (GET_MODE (next_insn) == TImode && can_issue_more)
32534 PUT_MODE (next_insn, VOIDmode);
32535 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32536 PUT_MODE (next_insn, TImode);
32538 insn = next_insn;
32539 if (can_issue_more == 0)
32540 can_issue_more = issue_rate;
32541 } /* while */
32543 return group_count;
32546 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32547 dispatch group boundaries that the scheduler had marked. Pad with nops
32548 any dispatch groups which have vacant issue slots, in order to force the
32549 scheduler's grouping on the processor dispatcher. The function
32550 returns the number of dispatch groups found. */
32552 static int
32553 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32554 rtx_insn *tail)
32556 rtx_insn *insn, *next_insn;
32557 rtx nop;
32558 int issue_rate;
32559 int can_issue_more;
32560 int group_end;
32561 int group_count = 0;
32563 /* Initialize issue_rate. */
32564 issue_rate = rs6000_issue_rate ();
32565 can_issue_more = issue_rate;
32567 insn = get_next_active_insn (prev_head_insn, tail);
32568 next_insn = get_next_active_insn (insn, tail);
32570 while (insn != NULL_RTX)
32572 can_issue_more =
32573 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32575 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32577 if (next_insn == NULL_RTX)
32578 break;
32580 if (group_end)
32582 /* If the scheduler had marked group termination at this location
32583 (between insn and next_insn), and neither insn nor next_insn will
32584 force group termination, pad the group with nops to force group
32585 termination. */
32586 if (can_issue_more
32587 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32588 && !insn_terminates_group_p (insn, current_group)
32589 && !insn_terminates_group_p (next_insn, previous_group))
32591 if (!is_branch_slot_insn (next_insn))
32592 can_issue_more--;
32594 while (can_issue_more)
32596 nop = gen_nop ();
32597 emit_insn_before (nop, next_insn);
32598 can_issue_more--;
32602 can_issue_more = issue_rate;
32603 group_count++;
32606 insn = next_insn;
32607 next_insn = get_next_active_insn (insn, tail);
32610 return group_count;
32613 /* We're beginning a new block. Initialize data structures as necessary. */
32615 static void
32616 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32617 int sched_verbose ATTRIBUTE_UNUSED,
32618 int max_ready ATTRIBUTE_UNUSED)
32620 last_scheduled_insn = NULL;
32621 load_store_pendulum = 0;
32622 divide_cnt = 0;
32623 vec_pairing = 0;
32626 /* The following function is called at the end of scheduling BB.
32627 After reload, it inserts nops at insn group bundling. */
32629 static void
32630 rs6000_sched_finish (FILE *dump, int sched_verbose)
32632 int n_groups;
32634 if (sched_verbose)
32635 fprintf (dump, "=== Finishing schedule.\n");
32637 if (reload_completed && rs6000_sched_groups)
32639 /* Do not run sched_finish hook when selective scheduling enabled. */
32640 if (sel_sched_p ())
32641 return;
32643 if (rs6000_sched_insert_nops == sched_finish_none)
32644 return;
32646 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32647 n_groups = pad_groups (dump, sched_verbose,
32648 current_sched_info->prev_head,
32649 current_sched_info->next_tail);
32650 else
32651 n_groups = redefine_groups (dump, sched_verbose,
32652 current_sched_info->prev_head,
32653 current_sched_info->next_tail);
32655 if (sched_verbose >= 6)
32657 fprintf (dump, "ngroups = %d\n", n_groups);
32658 print_rtl (dump, current_sched_info->prev_head);
32659 fprintf (dump, "Done finish_sched\n");
32664 struct rs6000_sched_context
32666 short cached_can_issue_more;
32667 rtx_insn *last_scheduled_insn;
32668 int load_store_pendulum;
32669 int divide_cnt;
32670 int vec_pairing;
32673 typedef struct rs6000_sched_context rs6000_sched_context_def;
32674 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32676 /* Allocate store for new scheduling context. */
32677 static void *
32678 rs6000_alloc_sched_context (void)
32680 return xmalloc (sizeof (rs6000_sched_context_def));
32683 /* If CLEAN_P is true then initializes _SC with clean data,
32684 and from the global context otherwise. */
32685 static void
32686 rs6000_init_sched_context (void *_sc, bool clean_p)
32688 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32690 if (clean_p)
32692 sc->cached_can_issue_more = 0;
32693 sc->last_scheduled_insn = NULL;
32694 sc->load_store_pendulum = 0;
32695 sc->divide_cnt = 0;
32696 sc->vec_pairing = 0;
32698 else
32700 sc->cached_can_issue_more = cached_can_issue_more;
32701 sc->last_scheduled_insn = last_scheduled_insn;
32702 sc->load_store_pendulum = load_store_pendulum;
32703 sc->divide_cnt = divide_cnt;
32704 sc->vec_pairing = vec_pairing;
32708 /* Sets the global scheduling context to the one pointed to by _SC. */
32709 static void
32710 rs6000_set_sched_context (void *_sc)
32712 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32714 gcc_assert (sc != NULL);
32716 cached_can_issue_more = sc->cached_can_issue_more;
32717 last_scheduled_insn = sc->last_scheduled_insn;
32718 load_store_pendulum = sc->load_store_pendulum;
32719 divide_cnt = sc->divide_cnt;
32720 vec_pairing = sc->vec_pairing;
32723 /* Free _SC. */
32724 static void
32725 rs6000_free_sched_context (void *_sc)
32727 gcc_assert (_sc != NULL);
32729 free (_sc);
32732 static bool
32733 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32735 switch (get_attr_type (insn))
32737 case TYPE_DIV:
32738 case TYPE_SDIV:
32739 case TYPE_DDIV:
32740 case TYPE_VECDIV:
32741 case TYPE_SSQRT:
32742 case TYPE_DSQRT:
32743 return false;
32745 default:
32746 return true;
32750 /* Length in units of the trampoline for entering a nested function. */
32753 rs6000_trampoline_size (void)
32755 int ret = 0;
32757 switch (DEFAULT_ABI)
32759 default:
32760 gcc_unreachable ();
32762 case ABI_AIX:
32763 ret = (TARGET_32BIT) ? 12 : 24;
32764 break;
32766 case ABI_ELFv2:
32767 gcc_assert (!TARGET_32BIT);
32768 ret = 32;
32769 break;
32771 case ABI_DARWIN:
32772 case ABI_V4:
32773 ret = (TARGET_32BIT) ? 40 : 48;
32774 break;
32777 return ret;
32780 /* Emit RTL insns to initialize the variable parts of a trampoline.
32781 FNADDR is an RTX for the address of the function's pure code.
32782 CXT is an RTX for the static chain value for the function. */
32784 static void
32785 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32787 int regsize = (TARGET_32BIT) ? 4 : 8;
32788 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32789 rtx ctx_reg = force_reg (Pmode, cxt);
32790 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32792 switch (DEFAULT_ABI)
32794 default:
32795 gcc_unreachable ();
32797 /* Under AIX, just build the 3 word function descriptor */
32798 case ABI_AIX:
32800 rtx fnmem, fn_reg, toc_reg;
32802 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32803 error ("you cannot take the address of a nested function if you use "
32804 "the %qs option", "-mno-pointers-to-nested-functions");
32806 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32807 fn_reg = gen_reg_rtx (Pmode);
32808 toc_reg = gen_reg_rtx (Pmode);
32810 /* Macro to shorten the code expansions below. */
32811 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32813 m_tramp = replace_equiv_address (m_tramp, addr);
32815 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32816 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32817 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32818 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32819 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32821 # undef MEM_PLUS
32823 break;
32825 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32826 case ABI_ELFv2:
32827 case ABI_DARWIN:
32828 case ABI_V4:
32829 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32830 LCT_NORMAL, VOIDmode,
32831 addr, Pmode,
32832 GEN_INT (rs6000_trampoline_size ()), SImode,
32833 fnaddr, Pmode,
32834 ctx_reg, Pmode);
32835 break;
32840 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32841 identifier as an argument, so the front end shouldn't look it up. */
32843 static bool
32844 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32846 return is_attribute_p ("altivec", attr_id);
32849 /* Handle the "altivec" attribute. The attribute may have
32850 arguments as follows:
32852 __attribute__((altivec(vector__)))
32853 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32854 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32856 and may appear more than once (e.g., 'vector bool char') in a
32857 given declaration. */
32859 static tree
32860 rs6000_handle_altivec_attribute (tree *node,
32861 tree name ATTRIBUTE_UNUSED,
32862 tree args,
32863 int flags ATTRIBUTE_UNUSED,
32864 bool *no_add_attrs)
32866 tree type = *node, result = NULL_TREE;
32867 machine_mode mode;
32868 int unsigned_p;
32869 char altivec_type
32870 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32871 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32872 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32873 : '?');
32875 while (POINTER_TYPE_P (type)
32876 || TREE_CODE (type) == FUNCTION_TYPE
32877 || TREE_CODE (type) == METHOD_TYPE
32878 || TREE_CODE (type) == ARRAY_TYPE)
32879 type = TREE_TYPE (type);
32881 mode = TYPE_MODE (type);
32883 /* Check for invalid AltiVec type qualifiers. */
32884 if (type == long_double_type_node)
32885 error ("use of %<long double%> in AltiVec types is invalid");
32886 else if (type == boolean_type_node)
32887 error ("use of boolean types in AltiVec types is invalid");
32888 else if (TREE_CODE (type) == COMPLEX_TYPE)
32889 error ("use of %<complex%> in AltiVec types is invalid");
32890 else if (DECIMAL_FLOAT_MODE_P (mode))
32891 error ("use of decimal floating point types in AltiVec types is invalid");
32892 else if (!TARGET_VSX)
32894 if (type == long_unsigned_type_node || type == long_integer_type_node)
32896 if (TARGET_64BIT)
32897 error ("use of %<long%> in AltiVec types is invalid for "
32898 "64-bit code without %qs", "-mvsx");
32899 else if (rs6000_warn_altivec_long)
32900 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32901 "use %<int%>");
32903 else if (type == long_long_unsigned_type_node
32904 || type == long_long_integer_type_node)
32905 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32906 "-mvsx");
32907 else if (type == double_type_node)
32908 error ("use of %<double%> in AltiVec types is invalid without %qs",
32909 "-mvsx");
32912 switch (altivec_type)
32914 case 'v':
32915 unsigned_p = TYPE_UNSIGNED (type);
32916 switch (mode)
32918 case E_TImode:
32919 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32920 break;
32921 case E_DImode:
32922 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32923 break;
32924 case E_SImode:
32925 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32926 break;
32927 case E_HImode:
32928 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32929 break;
32930 case E_QImode:
32931 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32932 break;
32933 case E_SFmode: result = V4SF_type_node; break;
32934 case E_DFmode: result = V2DF_type_node; break;
32935 /* If the user says 'vector int bool', we may be handed the 'bool'
32936 attribute _before_ the 'vector' attribute, and so select the
32937 proper type in the 'b' case below. */
32938 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32939 case E_V2DImode: case E_V2DFmode:
32940 result = type;
32941 default: break;
32943 break;
32944 case 'b':
32945 switch (mode)
32947 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32948 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32949 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32950 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32951 default: break;
32953 break;
32954 case 'p':
32955 switch (mode)
32957 case E_V8HImode: result = pixel_V8HI_type_node;
32958 default: break;
32960 default: break;
32963 /* Propagate qualifiers attached to the element type
32964 onto the vector type. */
32965 if (result && result != type && TYPE_QUALS (type))
32966 result = build_qualified_type (result, TYPE_QUALS (type));
32968 *no_add_attrs = true; /* No need to hang on to the attribute. */
32970 if (result)
32971 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32973 return NULL_TREE;
32976 /* AltiVec defines four built-in scalar types that serve as vector
32977 elements; we must teach the compiler how to mangle them. */
32979 static const char *
32980 rs6000_mangle_type (const_tree type)
32982 type = TYPE_MAIN_VARIANT (type);
32984 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32985 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32986 return NULL;
32988 if (type == bool_char_type_node) return "U6__boolc";
32989 if (type == bool_short_type_node) return "U6__bools";
32990 if (type == pixel_type_node) return "u7__pixel";
32991 if (type == bool_int_type_node) return "U6__booli";
32992 if (type == bool_long_type_node) return "U6__booll";
32994 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32995 "g" for IBM extended double, no matter whether it is long double (using
32996 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32997 if (TARGET_FLOAT128_TYPE)
32999 if (type == ieee128_float_type_node)
33000 return "U10__float128";
33002 if (TARGET_LONG_DOUBLE_128)
33004 if (type == long_double_type_node)
33005 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
33007 if (type == ibm128_float_type_node)
33008 return "g";
33012 /* Mangle IBM extended float long double as `g' (__float128) on
33013 powerpc*-linux where long-double-64 previously was the default. */
33014 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
33015 && TARGET_ELF
33016 && TARGET_LONG_DOUBLE_128
33017 && !TARGET_IEEEQUAD)
33018 return "g";
33020 /* For all other types, use normal C++ mangling. */
33021 return NULL;
33024 /* Handle a "longcall" or "shortcall" attribute; arguments as in
33025 struct attribute_spec.handler. */
33027 static tree
33028 rs6000_handle_longcall_attribute (tree *node, tree name,
33029 tree args ATTRIBUTE_UNUSED,
33030 int flags ATTRIBUTE_UNUSED,
33031 bool *no_add_attrs)
33033 if (TREE_CODE (*node) != FUNCTION_TYPE
33034 && TREE_CODE (*node) != FIELD_DECL
33035 && TREE_CODE (*node) != TYPE_DECL)
33037 warning (OPT_Wattributes, "%qE attribute only applies to functions",
33038 name);
33039 *no_add_attrs = true;
33042 return NULL_TREE;
33045 /* Set longcall attributes on all functions declared when
33046 rs6000_default_long_calls is true. */
33047 static void
33048 rs6000_set_default_type_attributes (tree type)
33050 if (rs6000_default_long_calls
33051 && (TREE_CODE (type) == FUNCTION_TYPE
33052 || TREE_CODE (type) == METHOD_TYPE))
33053 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
33054 NULL_TREE,
33055 TYPE_ATTRIBUTES (type));
33057 #if TARGET_MACHO
33058 darwin_set_default_type_attributes (type);
33059 #endif
33062 /* Return a reference suitable for calling a function with the
33063 longcall attribute. */
33066 rs6000_longcall_ref (rtx call_ref)
33068 const char *call_name;
33069 tree node;
33071 if (GET_CODE (call_ref) != SYMBOL_REF)
33072 return call_ref;
33074 /* System V adds '.' to the internal name, so skip them. */
33075 call_name = XSTR (call_ref, 0);
33076 if (*call_name == '.')
33078 while (*call_name == '.')
33079 call_name++;
33081 node = get_identifier (call_name);
33082 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
33085 return force_reg (Pmode, call_ref);
33088 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
33089 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
33090 #endif
33092 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
33093 struct attribute_spec.handler. */
33094 static tree
33095 rs6000_handle_struct_attribute (tree *node, tree name,
33096 tree args ATTRIBUTE_UNUSED,
33097 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
33099 tree *type = NULL;
33100 if (DECL_P (*node))
33102 if (TREE_CODE (*node) == TYPE_DECL)
33103 type = &TREE_TYPE (*node);
33105 else
33106 type = node;
33108 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
33109 || TREE_CODE (*type) == UNION_TYPE)))
33111 warning (OPT_Wattributes, "%qE attribute ignored", name);
33112 *no_add_attrs = true;
33115 else if ((is_attribute_p ("ms_struct", name)
33116 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
33117 || ((is_attribute_p ("gcc_struct", name)
33118 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
33120 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
33121 name);
33122 *no_add_attrs = true;
33125 return NULL_TREE;
33128 static bool
33129 rs6000_ms_bitfield_layout_p (const_tree record_type)
33131 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
33132 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
33133 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
33136 #ifdef USING_ELFOS_H
33138 /* A get_unnamed_section callback, used for switching to toc_section. */
33140 static void
33141 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33143 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33144 && TARGET_MINIMAL_TOC)
33146 if (!toc_initialized)
33148 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33149 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33150 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33151 fprintf (asm_out_file, "\t.tc ");
33152 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33153 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33154 fprintf (asm_out_file, "\n");
33156 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33157 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33158 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33159 fprintf (asm_out_file, " = .+32768\n");
33160 toc_initialized = 1;
33162 else
33163 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33165 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33167 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33168 if (!toc_initialized)
33170 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33171 toc_initialized = 1;
33174 else
33176 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33177 if (!toc_initialized)
33179 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33180 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33181 fprintf (asm_out_file, " = .+32768\n");
33182 toc_initialized = 1;
33187 /* Implement TARGET_ASM_INIT_SECTIONS. */
33189 static void
33190 rs6000_elf_asm_init_sections (void)
33192 toc_section
33193 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33195 sdata2_section
33196 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33197 SDATA2_SECTION_ASM_OP);
33200 /* Implement TARGET_SELECT_RTX_SECTION. */
33202 static section *
33203 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33204 unsigned HOST_WIDE_INT align)
33206 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33207 return toc_section;
33208 else
33209 return default_elf_select_rtx_section (mode, x, align);
33212 /* For a SYMBOL_REF, set generic flags and then perform some
33213 target-specific processing.
33215 When the AIX ABI is requested on a non-AIX system, replace the
33216 function name with the real name (with a leading .) rather than the
33217 function descriptor name. This saves a lot of overriding code to
33218 read the prefixes. */
33220 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33221 static void
33222 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33224 default_encode_section_info (decl, rtl, first);
33226 if (first
33227 && TREE_CODE (decl) == FUNCTION_DECL
33228 && !TARGET_AIX
33229 && DEFAULT_ABI == ABI_AIX)
33231 rtx sym_ref = XEXP (rtl, 0);
33232 size_t len = strlen (XSTR (sym_ref, 0));
33233 char *str = XALLOCAVEC (char, len + 2);
33234 str[0] = '.';
33235 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33236 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33240 static inline bool
33241 compare_section_name (const char *section, const char *templ)
33243 int len;
33245 len = strlen (templ);
33246 return (strncmp (section, templ, len) == 0
33247 && (section[len] == 0 || section[len] == '.'));
33250 bool
33251 rs6000_elf_in_small_data_p (const_tree decl)
33253 if (rs6000_sdata == SDATA_NONE)
33254 return false;
33256 /* We want to merge strings, so we never consider them small data. */
33257 if (TREE_CODE (decl) == STRING_CST)
33258 return false;
33260 /* Functions are never in the small data area. */
33261 if (TREE_CODE (decl) == FUNCTION_DECL)
33262 return false;
33264 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33266 const char *section = DECL_SECTION_NAME (decl);
33267 if (compare_section_name (section, ".sdata")
33268 || compare_section_name (section, ".sdata2")
33269 || compare_section_name (section, ".gnu.linkonce.s")
33270 || compare_section_name (section, ".sbss")
33271 || compare_section_name (section, ".sbss2")
33272 || compare_section_name (section, ".gnu.linkonce.sb")
33273 || strcmp (section, ".PPC.EMB.sdata0") == 0
33274 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33275 return true;
33277 else
33279 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33281 if (size > 0
33282 && size <= g_switch_value
33283 /* If it's not public, and we're not going to reference it there,
33284 there's no need to put it in the small data section. */
33285 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33286 return true;
33289 return false;
33292 #endif /* USING_ELFOS_H */
33294 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33296 static bool
33297 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33299 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33302 /* Do not place thread-local symbols refs in the object blocks. */
33304 static bool
33305 rs6000_use_blocks_for_decl_p (const_tree decl)
33307 return !DECL_THREAD_LOCAL_P (decl);
33310 /* Return a REG that occurs in ADDR with coefficient 1.
33311 ADDR can be effectively incremented by incrementing REG.
33313 r0 is special and we must not select it as an address
33314 register by this routine since our caller will try to
33315 increment the returned register via an "la" instruction. */
33318 find_addr_reg (rtx addr)
33320 while (GET_CODE (addr) == PLUS)
33322 if (GET_CODE (XEXP (addr, 0)) == REG
33323 && REGNO (XEXP (addr, 0)) != 0)
33324 addr = XEXP (addr, 0);
33325 else if (GET_CODE (XEXP (addr, 1)) == REG
33326 && REGNO (XEXP (addr, 1)) != 0)
33327 addr = XEXP (addr, 1);
33328 else if (CONSTANT_P (XEXP (addr, 0)))
33329 addr = XEXP (addr, 1);
33330 else if (CONSTANT_P (XEXP (addr, 1)))
33331 addr = XEXP (addr, 0);
33332 else
33333 gcc_unreachable ();
33335 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33336 return addr;
33339 void
33340 rs6000_fatal_bad_address (rtx op)
33342 fatal_insn ("bad address", op);
33345 #if TARGET_MACHO
33347 typedef struct branch_island_d {
33348 tree function_name;
33349 tree label_name;
33350 int line_number;
33351 } branch_island;
33354 static vec<branch_island, va_gc> *branch_islands;
33356 /* Remember to generate a branch island for far calls to the given
33357 function. */
33359 static void
33360 add_compiler_branch_island (tree label_name, tree function_name,
33361 int line_number)
33363 branch_island bi = {function_name, label_name, line_number};
33364 vec_safe_push (branch_islands, bi);
33367 /* Generate far-jump branch islands for everything recorded in
33368 branch_islands. Invoked immediately after the last instruction of
33369 the epilogue has been emitted; the branch islands must be appended
33370 to, and contiguous with, the function body. Mach-O stubs are
33371 generated in machopic_output_stub(). */
33373 static void
33374 macho_branch_islands (void)
33376 char tmp_buf[512];
33378 while (!vec_safe_is_empty (branch_islands))
33380 branch_island *bi = &branch_islands->last ();
33381 const char *label = IDENTIFIER_POINTER (bi->label_name);
33382 const char *name = IDENTIFIER_POINTER (bi->function_name);
33383 char name_buf[512];
33384 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33385 if (name[0] == '*' || name[0] == '&')
33386 strcpy (name_buf, name+1);
33387 else
33389 name_buf[0] = '_';
33390 strcpy (name_buf+1, name);
33392 strcpy (tmp_buf, "\n");
33393 strcat (tmp_buf, label);
33394 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33395 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33396 dbxout_stabd (N_SLINE, bi->line_number);
33397 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33398 if (flag_pic)
33400 if (TARGET_LINK_STACK)
33402 char name[32];
33403 get_ppc476_thunk_name (name);
33404 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33405 strcat (tmp_buf, name);
33406 strcat (tmp_buf, "\n");
33407 strcat (tmp_buf, label);
33408 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33410 else
33412 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33413 strcat (tmp_buf, label);
33414 strcat (tmp_buf, "_pic\n");
33415 strcat (tmp_buf, label);
33416 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33419 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33420 strcat (tmp_buf, name_buf);
33421 strcat (tmp_buf, " - ");
33422 strcat (tmp_buf, label);
33423 strcat (tmp_buf, "_pic)\n");
33425 strcat (tmp_buf, "\tmtlr r0\n");
33427 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33428 strcat (tmp_buf, name_buf);
33429 strcat (tmp_buf, " - ");
33430 strcat (tmp_buf, label);
33431 strcat (tmp_buf, "_pic)\n");
33433 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33435 else
33437 strcat (tmp_buf, ":\nlis r12,hi16(");
33438 strcat (tmp_buf, name_buf);
33439 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33440 strcat (tmp_buf, name_buf);
33441 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33443 output_asm_insn (tmp_buf, 0);
33444 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33445 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33446 dbxout_stabd (N_SLINE, bi->line_number);
33447 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33448 branch_islands->pop ();
33452 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33453 already there or not. */
33455 static int
33456 no_previous_def (tree function_name)
33458 branch_island *bi;
33459 unsigned ix;
33461 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33462 if (function_name == bi->function_name)
33463 return 0;
33464 return 1;
33467 /* GET_PREV_LABEL gets the label name from the previous definition of
33468 the function. */
33470 static tree
33471 get_prev_label (tree function_name)
33473 branch_island *bi;
33474 unsigned ix;
33476 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33477 if (function_name == bi->function_name)
33478 return bi->label_name;
33479 return NULL_TREE;
33482 /* INSN is either a function call or a millicode call. It may have an
33483 unconditional jump in its delay slot.
33485 CALL_DEST is the routine we are calling. */
33487 char *
33488 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33489 int cookie_operand_number)
33491 static char buf[256];
33492 if (darwin_emit_branch_islands
33493 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33494 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33496 tree labelname;
33497 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33499 if (no_previous_def (funname))
33501 rtx label_rtx = gen_label_rtx ();
33502 char *label_buf, temp_buf[256];
33503 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33504 CODE_LABEL_NUMBER (label_rtx));
33505 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33506 labelname = get_identifier (label_buf);
33507 add_compiler_branch_island (labelname, funname, insn_line (insn));
33509 else
33510 labelname = get_prev_label (funname);
33512 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33513 instruction will reach 'foo', otherwise link as 'bl L42'".
33514 "L42" should be a 'branch island', that will do a far jump to
33515 'foo'. Branch islands are generated in
33516 macho_branch_islands(). */
33517 sprintf (buf, "jbsr %%z%d,%.246s",
33518 dest_operand_number, IDENTIFIER_POINTER (labelname));
33520 else
33521 sprintf (buf, "bl %%z%d", dest_operand_number);
33522 return buf;
33525 /* Generate PIC and indirect symbol stubs. */
33527 void
33528 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33530 unsigned int length;
33531 char *symbol_name, *lazy_ptr_name;
33532 char *local_label_0;
33533 static int label = 0;
33535 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33536 symb = (*targetm.strip_name_encoding) (symb);
33539 length = strlen (symb);
33540 symbol_name = XALLOCAVEC (char, length + 32);
33541 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33543 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33544 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33546 if (flag_pic == 2)
33547 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33548 else
33549 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33551 if (flag_pic == 2)
33553 fprintf (file, "\t.align 5\n");
33555 fprintf (file, "%s:\n", stub);
33556 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33558 label++;
33559 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33560 sprintf (local_label_0, "\"L%011d$spb\"", label);
33562 fprintf (file, "\tmflr r0\n");
33563 if (TARGET_LINK_STACK)
33565 char name[32];
33566 get_ppc476_thunk_name (name);
33567 fprintf (file, "\tbl %s\n", name);
33568 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33570 else
33572 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33573 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33575 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33576 lazy_ptr_name, local_label_0);
33577 fprintf (file, "\tmtlr r0\n");
33578 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33579 (TARGET_64BIT ? "ldu" : "lwzu"),
33580 lazy_ptr_name, local_label_0);
33581 fprintf (file, "\tmtctr r12\n");
33582 fprintf (file, "\tbctr\n");
33584 else
33586 fprintf (file, "\t.align 4\n");
33588 fprintf (file, "%s:\n", stub);
33589 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33591 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33592 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33593 (TARGET_64BIT ? "ldu" : "lwzu"),
33594 lazy_ptr_name);
33595 fprintf (file, "\tmtctr r12\n");
33596 fprintf (file, "\tbctr\n");
33599 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33600 fprintf (file, "%s:\n", lazy_ptr_name);
33601 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33602 fprintf (file, "%sdyld_stub_binding_helper\n",
33603 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33606 /* Legitimize PIC addresses. If the address is already
33607 position-independent, we return ORIG. Newly generated
33608 position-independent addresses go into a reg. This is REG if non
33609 zero, otherwise we allocate register(s) as necessary. */
33611 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33614 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33615 rtx reg)
33617 rtx base, offset;
33619 if (reg == NULL && !reload_completed)
33620 reg = gen_reg_rtx (Pmode);
33622 if (GET_CODE (orig) == CONST)
33624 rtx reg_temp;
33626 if (GET_CODE (XEXP (orig, 0)) == PLUS
33627 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33628 return orig;
33630 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33632 /* Use a different reg for the intermediate value, as
33633 it will be marked UNCHANGING. */
33634 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33635 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33636 Pmode, reg_temp);
33637 offset =
33638 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33639 Pmode, reg);
33641 if (GET_CODE (offset) == CONST_INT)
33643 if (SMALL_INT (offset))
33644 return plus_constant (Pmode, base, INTVAL (offset));
33645 else if (!reload_completed)
33646 offset = force_reg (Pmode, offset);
33647 else
33649 rtx mem = force_const_mem (Pmode, orig);
33650 return machopic_legitimize_pic_address (mem, Pmode, reg);
33653 return gen_rtx_PLUS (Pmode, base, offset);
33656 /* Fall back on generic machopic code. */
33657 return machopic_legitimize_pic_address (orig, mode, reg);
33660 /* Output a .machine directive for the Darwin assembler, and call
33661 the generic start_file routine. */
33663 static void
33664 rs6000_darwin_file_start (void)
33666 static const struct
33668 const char *arg;
33669 const char *name;
33670 HOST_WIDE_INT if_set;
33671 } mapping[] = {
33672 { "ppc64", "ppc64", MASK_64BIT },
33673 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33674 { "power4", "ppc970", 0 },
33675 { "G5", "ppc970", 0 },
33676 { "7450", "ppc7450", 0 },
33677 { "7400", "ppc7400", MASK_ALTIVEC },
33678 { "G4", "ppc7400", 0 },
33679 { "750", "ppc750", 0 },
33680 { "740", "ppc750", 0 },
33681 { "G3", "ppc750", 0 },
33682 { "604e", "ppc604e", 0 },
33683 { "604", "ppc604", 0 },
33684 { "603e", "ppc603", 0 },
33685 { "603", "ppc603", 0 },
33686 { "601", "ppc601", 0 },
33687 { NULL, "ppc", 0 } };
33688 const char *cpu_id = "";
33689 size_t i;
33691 rs6000_file_start ();
33692 darwin_file_start ();
33694 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33696 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33697 cpu_id = rs6000_default_cpu;
33699 if (global_options_set.x_rs6000_cpu_index)
33700 cpu_id = processor_target_table[rs6000_cpu_index].name;
33702 /* Look through the mapping array. Pick the first name that either
33703 matches the argument, has a bit set in IF_SET that is also set
33704 in the target flags, or has a NULL name. */
33706 i = 0;
33707 while (mapping[i].arg != NULL
33708 && strcmp (mapping[i].arg, cpu_id) != 0
33709 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33710 i++;
33712 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33715 #endif /* TARGET_MACHO */
33717 #if TARGET_ELF
33718 static int
33719 rs6000_elf_reloc_rw_mask (void)
33721 if (flag_pic)
33722 return 3;
33723 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33724 return 2;
33725 else
33726 return 0;
33729 /* Record an element in the table of global constructors. SYMBOL is
33730 a SYMBOL_REF of the function to be called; PRIORITY is a number
33731 between 0 and MAX_INIT_PRIORITY.
33733 This differs from default_named_section_asm_out_constructor in
33734 that we have special handling for -mrelocatable. */
33736 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33737 static void
33738 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33740 const char *section = ".ctors";
33741 char buf[18];
33743 if (priority != DEFAULT_INIT_PRIORITY)
33745 sprintf (buf, ".ctors.%.5u",
33746 /* Invert the numbering so the linker puts us in the proper
33747 order; constructors are run from right to left, and the
33748 linker sorts in increasing order. */
33749 MAX_INIT_PRIORITY - priority);
33750 section = buf;
33753 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33754 assemble_align (POINTER_SIZE);
33756 if (DEFAULT_ABI == ABI_V4
33757 && (TARGET_RELOCATABLE || flag_pic > 1))
33759 fputs ("\t.long (", asm_out_file);
33760 output_addr_const (asm_out_file, symbol);
33761 fputs (")@fixup\n", asm_out_file);
33763 else
33764 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33767 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33768 static void
33769 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33771 const char *section = ".dtors";
33772 char buf[18];
33774 if (priority != DEFAULT_INIT_PRIORITY)
33776 sprintf (buf, ".dtors.%.5u",
33777 /* Invert the numbering so the linker puts us in the proper
33778 order; constructors are run from right to left, and the
33779 linker sorts in increasing order. */
33780 MAX_INIT_PRIORITY - priority);
33781 section = buf;
33784 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33785 assemble_align (POINTER_SIZE);
33787 if (DEFAULT_ABI == ABI_V4
33788 && (TARGET_RELOCATABLE || flag_pic > 1))
33790 fputs ("\t.long (", asm_out_file);
33791 output_addr_const (asm_out_file, symbol);
33792 fputs (")@fixup\n", asm_out_file);
33794 else
33795 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33798 void
33799 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33801 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33803 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33804 ASM_OUTPUT_LABEL (file, name);
33805 fputs (DOUBLE_INT_ASM_OP, file);
33806 rs6000_output_function_entry (file, name);
33807 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33808 if (DOT_SYMBOLS)
33810 fputs ("\t.size\t", file);
33811 assemble_name (file, name);
33812 fputs (",24\n\t.type\t.", file);
33813 assemble_name (file, name);
33814 fputs (",@function\n", file);
33815 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33817 fputs ("\t.globl\t.", file);
33818 assemble_name (file, name);
33819 putc ('\n', file);
33822 else
33823 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33824 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33825 rs6000_output_function_entry (file, name);
33826 fputs (":\n", file);
33827 return;
33830 int uses_toc;
33831 if (DEFAULT_ABI == ABI_V4
33832 && (TARGET_RELOCATABLE || flag_pic > 1)
33833 && !TARGET_SECURE_PLT
33834 && (!constant_pool_empty_p () || crtl->profile)
33835 && (uses_toc = uses_TOC ()))
33837 char buf[256];
33839 if (uses_toc == 2)
33840 switch_to_other_text_partition ();
33841 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33843 fprintf (file, "\t.long ");
33844 assemble_name (file, toc_label_name);
33845 need_toc_init = 1;
33846 putc ('-', file);
33847 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33848 assemble_name (file, buf);
33849 putc ('\n', file);
33850 if (uses_toc == 2)
33851 switch_to_other_text_partition ();
33854 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33855 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33857 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33859 char buf[256];
33861 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33863 fprintf (file, "\t.quad .TOC.-");
33864 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33865 assemble_name (file, buf);
33866 putc ('\n', file);
33869 if (DEFAULT_ABI == ABI_AIX)
33871 const char *desc_name, *orig_name;
33873 orig_name = (*targetm.strip_name_encoding) (name);
33874 desc_name = orig_name;
33875 while (*desc_name == '.')
33876 desc_name++;
33878 if (TREE_PUBLIC (decl))
33879 fprintf (file, "\t.globl %s\n", desc_name);
33881 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33882 fprintf (file, "%s:\n", desc_name);
33883 fprintf (file, "\t.long %s\n", orig_name);
33884 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33885 fputs ("\t.long 0\n", file);
33886 fprintf (file, "\t.previous\n");
33888 ASM_OUTPUT_LABEL (file, name);
33891 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33892 static void
33893 rs6000_elf_file_end (void)
33895 #ifdef HAVE_AS_GNU_ATTRIBUTE
33896 /* ??? The value emitted depends on options active at file end.
33897 Assume anyone using #pragma or attributes that might change
33898 options knows what they are doing. */
33899 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33900 && rs6000_passes_float)
33902 int fp;
33904 if (TARGET_DF_FPR)
33905 fp = 1;
33906 else if (TARGET_SF_FPR)
33907 fp = 3;
33908 else
33909 fp = 2;
33910 if (rs6000_passes_long_double)
33912 if (!TARGET_LONG_DOUBLE_128)
33913 fp |= 2 * 4;
33914 else if (TARGET_IEEEQUAD)
33915 fp |= 3 * 4;
33916 else
33917 fp |= 1 * 4;
33919 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33921 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33923 if (rs6000_passes_vector)
33924 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33925 (TARGET_ALTIVEC_ABI ? 2 : 1));
33926 if (rs6000_returns_struct)
33927 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33928 aix_struct_return ? 2 : 1);
33930 #endif
33931 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33932 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33933 file_end_indicate_exec_stack ();
33934 #endif
33936 if (flag_split_stack)
33937 file_end_indicate_split_stack ();
33939 if (cpu_builtin_p)
33941 /* We have expanded a CPU builtin, so we need to emit a reference to
33942 the special symbol that LIBC uses to declare it supports the
33943 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33944 switch_to_section (data_section);
33945 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33946 fprintf (asm_out_file, "\t%s %s\n",
33947 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33950 #endif
33952 #if TARGET_XCOFF
33954 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33955 #define HAVE_XCOFF_DWARF_EXTRAS 0
33956 #endif
33958 static enum unwind_info_type
33959 rs6000_xcoff_debug_unwind_info (void)
33961 return UI_NONE;
33964 static void
33965 rs6000_xcoff_asm_output_anchor (rtx symbol)
33967 char buffer[100];
33969 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33970 SYMBOL_REF_BLOCK_OFFSET (symbol));
33971 fprintf (asm_out_file, "%s", SET_ASM_OP);
33972 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33973 fprintf (asm_out_file, ",");
33974 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33975 fprintf (asm_out_file, "\n");
33978 static void
33979 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33981 fputs (GLOBAL_ASM_OP, stream);
33982 RS6000_OUTPUT_BASENAME (stream, name);
33983 putc ('\n', stream);
33986 /* A get_unnamed_decl callback, used for read-only sections. PTR
33987 points to the section string variable. */
33989 static void
33990 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33992 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33993 *(const char *const *) directive,
33994 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33997 /* Likewise for read-write sections. */
33999 static void
34000 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
34002 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
34003 *(const char *const *) directive,
34004 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
34007 static void
34008 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
34010 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
34011 *(const char *const *) directive,
34012 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
34015 /* A get_unnamed_section callback, used for switching to toc_section. */
34017 static void
34018 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
34020 if (TARGET_MINIMAL_TOC)
34022 /* toc_section is always selected at least once from
34023 rs6000_xcoff_file_start, so this is guaranteed to
34024 always be defined once and only once in each file. */
34025 if (!toc_initialized)
34027 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
34028 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
34029 toc_initialized = 1;
34031 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
34032 (TARGET_32BIT ? "" : ",3"));
34034 else
34035 fputs ("\t.toc\n", asm_out_file);
34038 /* Implement TARGET_ASM_INIT_SECTIONS. */
34040 static void
34041 rs6000_xcoff_asm_init_sections (void)
34043 read_only_data_section
34044 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
34045 &xcoff_read_only_section_name);
34047 private_data_section
34048 = get_unnamed_section (SECTION_WRITE,
34049 rs6000_xcoff_output_readwrite_section_asm_op,
34050 &xcoff_private_data_section_name);
34052 tls_data_section
34053 = get_unnamed_section (SECTION_TLS,
34054 rs6000_xcoff_output_tls_section_asm_op,
34055 &xcoff_tls_data_section_name);
34057 tls_private_data_section
34058 = get_unnamed_section (SECTION_TLS,
34059 rs6000_xcoff_output_tls_section_asm_op,
34060 &xcoff_private_data_section_name);
34062 read_only_private_data_section
34063 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
34064 &xcoff_private_data_section_name);
34066 toc_section
34067 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
34069 readonly_data_section = read_only_data_section;
34072 static int
34073 rs6000_xcoff_reloc_rw_mask (void)
34075 return 3;
34078 static void
34079 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
34080 tree decl ATTRIBUTE_UNUSED)
34082 int smclass;
34083 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
34085 if (flags & SECTION_EXCLUDE)
34086 smclass = 4;
34087 else if (flags & SECTION_DEBUG)
34089 fprintf (asm_out_file, "\t.dwsect %s\n", name);
34090 return;
34092 else if (flags & SECTION_CODE)
34093 smclass = 0;
34094 else if (flags & SECTION_TLS)
34095 smclass = 3;
34096 else if (flags & SECTION_WRITE)
34097 smclass = 2;
34098 else
34099 smclass = 1;
34101 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
34102 (flags & SECTION_CODE) ? "." : "",
34103 name, suffix[smclass], flags & SECTION_ENTSIZE);
34106 #define IN_NAMED_SECTION(DECL) \
34107 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34108 && DECL_SECTION_NAME (DECL) != NULL)
34110 static section *
34111 rs6000_xcoff_select_section (tree decl, int reloc,
34112 unsigned HOST_WIDE_INT align)
34114 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34115 named section. */
34116 if (align > BIGGEST_ALIGNMENT)
34118 resolve_unique_section (decl, reloc, true);
34119 if (IN_NAMED_SECTION (decl))
34120 return get_named_section (decl, NULL, reloc);
34123 if (decl_readonly_section (decl, reloc))
34125 if (TREE_PUBLIC (decl))
34126 return read_only_data_section;
34127 else
34128 return read_only_private_data_section;
34130 else
34132 #if HAVE_AS_TLS
34133 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34135 if (TREE_PUBLIC (decl))
34136 return tls_data_section;
34137 else if (bss_initializer_p (decl))
34139 /* Convert to COMMON to emit in BSS. */
34140 DECL_COMMON (decl) = 1;
34141 return tls_comm_section;
34143 else
34144 return tls_private_data_section;
34146 else
34147 #endif
34148 if (TREE_PUBLIC (decl))
34149 return data_section;
34150 else
34151 return private_data_section;
34155 static void
34156 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
34158 const char *name;
34160 /* Use select_section for private data and uninitialized data with
34161 alignment <= BIGGEST_ALIGNMENT. */
34162 if (!TREE_PUBLIC (decl)
34163 || DECL_COMMON (decl)
34164 || (DECL_INITIAL (decl) == NULL_TREE
34165 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
34166 || DECL_INITIAL (decl) == error_mark_node
34167 || (flag_zero_initialized_in_bss
34168 && initializer_zerop (DECL_INITIAL (decl))))
34169 return;
34171 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
34172 name = (*targetm.strip_name_encoding) (name);
34173 set_decl_section_name (decl, name);
34176 /* Select section for constant in constant pool.
34178 On RS/6000, all constants are in the private read-only data area.
34179 However, if this is being placed in the TOC it must be output as a
34180 toc entry. */
34182 static section *
34183 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34184 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34186 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34187 return toc_section;
34188 else
34189 return read_only_private_data_section;
34192 /* Remove any trailing [DS] or the like from the symbol name. */
34194 static const char *
34195 rs6000_xcoff_strip_name_encoding (const char *name)
34197 size_t len;
34198 if (*name == '*')
34199 name++;
34200 len = strlen (name);
34201 if (name[len - 1] == ']')
34202 return ggc_alloc_string (name, len - 4);
34203 else
34204 return name;
34207 /* Section attributes. AIX is always PIC. */
34209 static unsigned int
34210 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34212 unsigned int align;
34213 unsigned int flags = default_section_type_flags (decl, name, reloc);
34215 /* Align to at least UNIT size. */
34216 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34217 align = MIN_UNITS_PER_WORD;
34218 else
34219 /* Increase alignment of large objects if not already stricter. */
34220 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34221 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34222 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34224 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34227 /* Output at beginning of assembler file.
34229 Initialize the section names for the RS/6000 at this point.
34231 Specify filename, including full path, to assembler.
34233 We want to go into the TOC section so at least one .toc will be emitted.
34234 Also, in order to output proper .bs/.es pairs, we need at least one static
34235 [RW] section emitted.
34237 Finally, declare mcount when profiling to make the assembler happy. */
34239 static void
34240 rs6000_xcoff_file_start (void)
34242 rs6000_gen_section_name (&xcoff_bss_section_name,
34243 main_input_filename, ".bss_");
34244 rs6000_gen_section_name (&xcoff_private_data_section_name,
34245 main_input_filename, ".rw_");
34246 rs6000_gen_section_name (&xcoff_read_only_section_name,
34247 main_input_filename, ".ro_");
34248 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34249 main_input_filename, ".tls_");
34250 rs6000_gen_section_name (&xcoff_tbss_section_name,
34251 main_input_filename, ".tbss_[UL]");
34253 fputs ("\t.file\t", asm_out_file);
34254 output_quoted_string (asm_out_file, main_input_filename);
34255 fputc ('\n', asm_out_file);
34256 if (write_symbols != NO_DEBUG)
34257 switch_to_section (private_data_section);
34258 switch_to_section (toc_section);
34259 switch_to_section (text_section);
34260 if (profile_flag)
34261 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34262 rs6000_file_start ();
34265 /* Output at end of assembler file.
34266 On the RS/6000, referencing data should automatically pull in text. */
34268 static void
34269 rs6000_xcoff_file_end (void)
34271 switch_to_section (text_section);
34272 fputs ("_section_.text:\n", asm_out_file);
34273 switch_to_section (data_section);
34274 fputs (TARGET_32BIT
34275 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34276 asm_out_file);
34279 struct declare_alias_data
34281 FILE *file;
34282 bool function_descriptor;
34285 /* Declare alias N. A helper function for for_node_and_aliases. */
34287 static bool
34288 rs6000_declare_alias (struct symtab_node *n, void *d)
34290 struct declare_alias_data *data = (struct declare_alias_data *)d;
34291 /* Main symbol is output specially, because varasm machinery does part of
34292 the job for us - we do not need to declare .globl/lglobs and such. */
34293 if (!n->alias || n->weakref)
34294 return false;
34296 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34297 return false;
34299 /* Prevent assemble_alias from trying to use .set pseudo operation
34300 that does not behave as expected by the middle-end. */
34301 TREE_ASM_WRITTEN (n->decl) = true;
34303 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34304 char *buffer = (char *) alloca (strlen (name) + 2);
34305 char *p;
34306 int dollar_inside = 0;
34308 strcpy (buffer, name);
34309 p = strchr (buffer, '$');
34310 while (p) {
34311 *p = '_';
34312 dollar_inside++;
34313 p = strchr (p + 1, '$');
34315 if (TREE_PUBLIC (n->decl))
34317 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34319 if (dollar_inside) {
34320 if (data->function_descriptor)
34321 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34322 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34324 if (data->function_descriptor)
34326 fputs ("\t.globl .", data->file);
34327 RS6000_OUTPUT_BASENAME (data->file, buffer);
34328 putc ('\n', data->file);
34330 fputs ("\t.globl ", data->file);
34331 RS6000_OUTPUT_BASENAME (data->file, buffer);
34332 putc ('\n', data->file);
34334 #ifdef ASM_WEAKEN_DECL
34335 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34336 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34337 #endif
34339 else
34341 if (dollar_inside)
34343 if (data->function_descriptor)
34344 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34345 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34347 if (data->function_descriptor)
34349 fputs ("\t.lglobl .", data->file);
34350 RS6000_OUTPUT_BASENAME (data->file, buffer);
34351 putc ('\n', data->file);
34353 fputs ("\t.lglobl ", data->file);
34354 RS6000_OUTPUT_BASENAME (data->file, buffer);
34355 putc ('\n', data->file);
34357 if (data->function_descriptor)
34358 fputs (".", data->file);
34359 RS6000_OUTPUT_BASENAME (data->file, buffer);
34360 fputs (":\n", data->file);
34361 return false;
34365 #ifdef HAVE_GAS_HIDDEN
34366 /* Helper function to calculate visibility of a DECL
34367 and return the value as a const string. */
34369 static const char *
34370 rs6000_xcoff_visibility (tree decl)
34372 static const char * const visibility_types[] = {
34373 "", ",protected", ",hidden", ",internal"
34376 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34378 if (TREE_CODE (decl) == FUNCTION_DECL
34379 && cgraph_node::get (decl)
34380 && cgraph_node::get (decl)->instrumentation_clone
34381 && cgraph_node::get (decl)->instrumented_version)
34382 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
34384 return visibility_types[vis];
34386 #endif
34389 /* This macro produces the initial definition of a function name.
34390 On the RS/6000, we need to place an extra '.' in the function name and
34391 output the function descriptor.
34392 Dollar signs are converted to underscores.
34394 The csect for the function will have already been created when
34395 text_section was selected. We do have to go back to that csect, however.
34397 The third and fourth parameters to the .function pseudo-op (16 and 044)
34398 are placeholders which no longer have any use.
34400 Because AIX assembler's .set command has unexpected semantics, we output
34401 all aliases as alternative labels in front of the definition. */
34403 void
34404 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34406 char *buffer = (char *) alloca (strlen (name) + 1);
34407 char *p;
34408 int dollar_inside = 0;
34409 struct declare_alias_data data = {file, false};
34411 strcpy (buffer, name);
34412 p = strchr (buffer, '$');
34413 while (p) {
34414 *p = '_';
34415 dollar_inside++;
34416 p = strchr (p + 1, '$');
34418 if (TREE_PUBLIC (decl))
34420 if (!RS6000_WEAK || !DECL_WEAK (decl))
34422 if (dollar_inside) {
34423 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34424 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34426 fputs ("\t.globl .", file);
34427 RS6000_OUTPUT_BASENAME (file, buffer);
34428 #ifdef HAVE_GAS_HIDDEN
34429 fputs (rs6000_xcoff_visibility (decl), file);
34430 #endif
34431 putc ('\n', file);
34434 else
34436 if (dollar_inside) {
34437 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34438 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34440 fputs ("\t.lglobl .", file);
34441 RS6000_OUTPUT_BASENAME (file, buffer);
34442 putc ('\n', file);
34444 fputs ("\t.csect ", file);
34445 RS6000_OUTPUT_BASENAME (file, buffer);
34446 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34447 RS6000_OUTPUT_BASENAME (file, buffer);
34448 fputs (":\n", file);
34449 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34450 &data, true);
34451 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34452 RS6000_OUTPUT_BASENAME (file, buffer);
34453 fputs (", TOC[tc0], 0\n", file);
34454 in_section = NULL;
34455 switch_to_section (function_section (decl));
34456 putc ('.', file);
34457 RS6000_OUTPUT_BASENAME (file, buffer);
34458 fputs (":\n", file);
34459 data.function_descriptor = true;
34460 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34461 &data, true);
34462 if (!DECL_IGNORED_P (decl))
34464 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34465 xcoffout_declare_function (file, decl, buffer);
34466 else if (write_symbols == DWARF2_DEBUG)
34468 name = (*targetm.strip_name_encoding) (name);
34469 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34472 return;
34476 /* Output assembly language to globalize a symbol from a DECL,
34477 possibly with visibility. */
34479 void
34480 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34482 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34483 fputs (GLOBAL_ASM_OP, stream);
34484 RS6000_OUTPUT_BASENAME (stream, name);
34485 #ifdef HAVE_GAS_HIDDEN
34486 fputs (rs6000_xcoff_visibility (decl), stream);
34487 #endif
34488 putc ('\n', stream);
34491 /* Output assembly language to define a symbol as COMMON from a DECL,
34492 possibly with visibility. */
34494 void
34495 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34496 tree decl ATTRIBUTE_UNUSED,
34497 const char *name,
34498 unsigned HOST_WIDE_INT size,
34499 unsigned HOST_WIDE_INT align)
34501 unsigned HOST_WIDE_INT align2 = 2;
34503 if (align > 32)
34504 align2 = floor_log2 (align / BITS_PER_UNIT);
34505 else if (size > 4)
34506 align2 = 3;
34508 fputs (COMMON_ASM_OP, stream);
34509 RS6000_OUTPUT_BASENAME (stream, name);
34511 fprintf (stream,
34512 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34513 size, align2);
34515 #ifdef HAVE_GAS_HIDDEN
34516 if (decl != NULL)
34517 fputs (rs6000_xcoff_visibility (decl), stream);
34518 #endif
34519 putc ('\n', stream);
34522 /* This macro produces the initial definition of a object (variable) name.
34523 Because AIX assembler's .set command has unexpected semantics, we output
34524 all aliases as alternative labels in front of the definition. */
34526 void
34527 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34529 struct declare_alias_data data = {file, false};
34530 RS6000_OUTPUT_BASENAME (file, name);
34531 fputs (":\n", file);
34532 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34533 &data, true);
34536 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34538 void
34539 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34541 fputs (integer_asm_op (size, FALSE), file);
34542 assemble_name (file, label);
34543 fputs ("-$", file);
34546 /* Output a symbol offset relative to the dbase for the current object.
34547 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34548 signed offsets.
34550 __gcc_unwind_dbase is embedded in all executables/libraries through
34551 libgcc/config/rs6000/crtdbase.S. */
34553 void
34554 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34556 fputs (integer_asm_op (size, FALSE), file);
34557 assemble_name (file, label);
34558 fputs("-__gcc_unwind_dbase", file);
34561 #ifdef HAVE_AS_TLS
34562 static void
34563 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34565 rtx symbol;
34566 int flags;
34567 const char *symname;
34569 default_encode_section_info (decl, rtl, first);
34571 /* Careful not to prod global register variables. */
34572 if (!MEM_P (rtl))
34573 return;
34574 symbol = XEXP (rtl, 0);
34575 if (GET_CODE (symbol) != SYMBOL_REF)
34576 return;
34578 flags = SYMBOL_REF_FLAGS (symbol);
34580 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34581 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34583 SYMBOL_REF_FLAGS (symbol) = flags;
34585 /* Append mapping class to extern decls. */
34586 symname = XSTR (symbol, 0);
34587 if (decl /* sync condition with assemble_external () */
34588 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34589 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34590 || TREE_CODE (decl) == FUNCTION_DECL)
34591 && symname[strlen (symname) - 1] != ']')
34593 char *newname = (char *) alloca (strlen (symname) + 5);
34594 strcpy (newname, symname);
34595 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34596 ? "[DS]" : "[UA]"));
34597 XSTR (symbol, 0) = ggc_strdup (newname);
34600 #endif /* HAVE_AS_TLS */
34601 #endif /* TARGET_XCOFF */
34603 void
34604 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34605 const char *name, const char *val)
34607 fputs ("\t.weak\t", stream);
34608 RS6000_OUTPUT_BASENAME (stream, name);
34609 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34610 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34612 if (TARGET_XCOFF)
34613 fputs ("[DS]", stream);
34614 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34615 if (TARGET_XCOFF)
34616 fputs (rs6000_xcoff_visibility (decl), stream);
34617 #endif
34618 fputs ("\n\t.weak\t.", stream);
34619 RS6000_OUTPUT_BASENAME (stream, name);
34621 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34622 if (TARGET_XCOFF)
34623 fputs (rs6000_xcoff_visibility (decl), stream);
34624 #endif
34625 fputc ('\n', stream);
34626 if (val)
34628 #ifdef ASM_OUTPUT_DEF
34629 ASM_OUTPUT_DEF (stream, name, val);
34630 #endif
34631 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34632 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34634 fputs ("\t.set\t.", stream);
34635 RS6000_OUTPUT_BASENAME (stream, name);
34636 fputs (",.", stream);
34637 RS6000_OUTPUT_BASENAME (stream, val);
34638 fputc ('\n', stream);
34644 /* Return true if INSN should not be copied. */
34646 static bool
34647 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34649 return recog_memoized (insn) >= 0
34650 && get_attr_cannot_copy (insn);
34653 /* Compute a (partial) cost for rtx X. Return true if the complete
34654 cost has been computed, and false if subexpressions should be
34655 scanned. In either case, *TOTAL contains the cost result. */
34657 static bool
34658 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34659 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34661 int code = GET_CODE (x);
34663 switch (code)
34665 /* On the RS/6000, if it is valid in the insn, it is free. */
34666 case CONST_INT:
34667 if (((outer_code == SET
34668 || outer_code == PLUS
34669 || outer_code == MINUS)
34670 && (satisfies_constraint_I (x)
34671 || satisfies_constraint_L (x)))
34672 || (outer_code == AND
34673 && (satisfies_constraint_K (x)
34674 || (mode == SImode
34675 ? satisfies_constraint_L (x)
34676 : satisfies_constraint_J (x))))
34677 || ((outer_code == IOR || outer_code == XOR)
34678 && (satisfies_constraint_K (x)
34679 || (mode == SImode
34680 ? satisfies_constraint_L (x)
34681 : satisfies_constraint_J (x))))
34682 || outer_code == ASHIFT
34683 || outer_code == ASHIFTRT
34684 || outer_code == LSHIFTRT
34685 || outer_code == ROTATE
34686 || outer_code == ROTATERT
34687 || outer_code == ZERO_EXTRACT
34688 || (outer_code == MULT
34689 && satisfies_constraint_I (x))
34690 || ((outer_code == DIV || outer_code == UDIV
34691 || outer_code == MOD || outer_code == UMOD)
34692 && exact_log2 (INTVAL (x)) >= 0)
34693 || (outer_code == COMPARE
34694 && (satisfies_constraint_I (x)
34695 || satisfies_constraint_K (x)))
34696 || ((outer_code == EQ || outer_code == NE)
34697 && (satisfies_constraint_I (x)
34698 || satisfies_constraint_K (x)
34699 || (mode == SImode
34700 ? satisfies_constraint_L (x)
34701 : satisfies_constraint_J (x))))
34702 || (outer_code == GTU
34703 && satisfies_constraint_I (x))
34704 || (outer_code == LTU
34705 && satisfies_constraint_P (x)))
34707 *total = 0;
34708 return true;
34710 else if ((outer_code == PLUS
34711 && reg_or_add_cint_operand (x, VOIDmode))
34712 || (outer_code == MINUS
34713 && reg_or_sub_cint_operand (x, VOIDmode))
34714 || ((outer_code == SET
34715 || outer_code == IOR
34716 || outer_code == XOR)
34717 && (INTVAL (x)
34718 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34720 *total = COSTS_N_INSNS (1);
34721 return true;
34723 /* FALLTHRU */
34725 case CONST_DOUBLE:
34726 case CONST_WIDE_INT:
34727 case CONST:
34728 case HIGH:
34729 case SYMBOL_REF:
34730 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34731 return true;
34733 case MEM:
34734 /* When optimizing for size, MEM should be slightly more expensive
34735 than generating address, e.g., (plus (reg) (const)).
34736 L1 cache latency is about two instructions. */
34737 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34738 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34739 *total += COSTS_N_INSNS (100);
34740 return true;
34742 case LABEL_REF:
34743 *total = 0;
34744 return true;
34746 case PLUS:
34747 case MINUS:
34748 if (FLOAT_MODE_P (mode))
34749 *total = rs6000_cost->fp;
34750 else
34751 *total = COSTS_N_INSNS (1);
34752 return false;
34754 case MULT:
34755 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34756 && satisfies_constraint_I (XEXP (x, 1)))
34758 if (INTVAL (XEXP (x, 1)) >= -256
34759 && INTVAL (XEXP (x, 1)) <= 255)
34760 *total = rs6000_cost->mulsi_const9;
34761 else
34762 *total = rs6000_cost->mulsi_const;
34764 else if (mode == SFmode)
34765 *total = rs6000_cost->fp;
34766 else if (FLOAT_MODE_P (mode))
34767 *total = rs6000_cost->dmul;
34768 else if (mode == DImode)
34769 *total = rs6000_cost->muldi;
34770 else
34771 *total = rs6000_cost->mulsi;
34772 return false;
34774 case FMA:
34775 if (mode == SFmode)
34776 *total = rs6000_cost->fp;
34777 else
34778 *total = rs6000_cost->dmul;
34779 break;
34781 case DIV:
34782 case MOD:
34783 if (FLOAT_MODE_P (mode))
34785 *total = mode == DFmode ? rs6000_cost->ddiv
34786 : rs6000_cost->sdiv;
34787 return false;
34789 /* FALLTHRU */
34791 case UDIV:
34792 case UMOD:
34793 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34794 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34796 if (code == DIV || code == MOD)
34797 /* Shift, addze */
34798 *total = COSTS_N_INSNS (2);
34799 else
34800 /* Shift */
34801 *total = COSTS_N_INSNS (1);
34803 else
34805 if (GET_MODE (XEXP (x, 1)) == DImode)
34806 *total = rs6000_cost->divdi;
34807 else
34808 *total = rs6000_cost->divsi;
34810 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34811 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34812 *total += COSTS_N_INSNS (2);
34813 return false;
34815 case CTZ:
34816 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34817 return false;
34819 case FFS:
34820 *total = COSTS_N_INSNS (4);
34821 return false;
34823 case POPCOUNT:
34824 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34825 return false;
34827 case PARITY:
34828 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34829 return false;
34831 case NOT:
34832 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34833 *total = 0;
34834 else
34835 *total = COSTS_N_INSNS (1);
34836 return false;
34838 case AND:
34839 if (CONST_INT_P (XEXP (x, 1)))
34841 rtx left = XEXP (x, 0);
34842 rtx_code left_code = GET_CODE (left);
34844 /* rotate-and-mask: 1 insn. */
34845 if ((left_code == ROTATE
34846 || left_code == ASHIFT
34847 || left_code == LSHIFTRT)
34848 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34850 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34851 if (!CONST_INT_P (XEXP (left, 1)))
34852 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34853 *total += COSTS_N_INSNS (1);
34854 return true;
34857 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34858 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34859 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34860 || (val & 0xffff) == val
34861 || (val & 0xffff0000) == val
34862 || ((val & 0xffff) == 0 && mode == SImode))
34864 *total = rtx_cost (left, mode, AND, 0, speed);
34865 *total += COSTS_N_INSNS (1);
34866 return true;
34869 /* 2 insns. */
34870 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34872 *total = rtx_cost (left, mode, AND, 0, speed);
34873 *total += COSTS_N_INSNS (2);
34874 return true;
34878 *total = COSTS_N_INSNS (1);
34879 return false;
34881 case IOR:
34882 /* FIXME */
34883 *total = COSTS_N_INSNS (1);
34884 return true;
34886 case CLZ:
34887 case XOR:
34888 case ZERO_EXTRACT:
34889 *total = COSTS_N_INSNS (1);
34890 return false;
34892 case ASHIFT:
34893 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34894 the sign extend and shift separately within the insn. */
34895 if (TARGET_EXTSWSLI && mode == DImode
34896 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34897 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34899 *total = 0;
34900 return false;
34902 /* fall through */
34904 case ASHIFTRT:
34905 case LSHIFTRT:
34906 case ROTATE:
34907 case ROTATERT:
34908 /* Handle mul_highpart. */
34909 if (outer_code == TRUNCATE
34910 && GET_CODE (XEXP (x, 0)) == MULT)
34912 if (mode == DImode)
34913 *total = rs6000_cost->muldi;
34914 else
34915 *total = rs6000_cost->mulsi;
34916 return true;
34918 else if (outer_code == AND)
34919 *total = 0;
34920 else
34921 *total = COSTS_N_INSNS (1);
34922 return false;
34924 case SIGN_EXTEND:
34925 case ZERO_EXTEND:
34926 if (GET_CODE (XEXP (x, 0)) == MEM)
34927 *total = 0;
34928 else
34929 *total = COSTS_N_INSNS (1);
34930 return false;
34932 case COMPARE:
34933 case NEG:
34934 case ABS:
34935 if (!FLOAT_MODE_P (mode))
34937 *total = COSTS_N_INSNS (1);
34938 return false;
34940 /* FALLTHRU */
34942 case FLOAT:
34943 case UNSIGNED_FLOAT:
34944 case FIX:
34945 case UNSIGNED_FIX:
34946 case FLOAT_TRUNCATE:
34947 *total = rs6000_cost->fp;
34948 return false;
34950 case FLOAT_EXTEND:
34951 if (mode == DFmode)
34952 *total = rs6000_cost->sfdf_convert;
34953 else
34954 *total = rs6000_cost->fp;
34955 return false;
34957 case UNSPEC:
34958 switch (XINT (x, 1))
34960 case UNSPEC_FRSP:
34961 *total = rs6000_cost->fp;
34962 return true;
34964 default:
34965 break;
34967 break;
34969 case CALL:
34970 case IF_THEN_ELSE:
34971 if (!speed)
34973 *total = COSTS_N_INSNS (1);
34974 return true;
34976 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34978 *total = rs6000_cost->fp;
34979 return false;
34981 break;
34983 case NE:
34984 case EQ:
34985 case GTU:
34986 case LTU:
34987 /* Carry bit requires mode == Pmode.
34988 NEG or PLUS already counted so only add one. */
34989 if (mode == Pmode
34990 && (outer_code == NEG || outer_code == PLUS))
34992 *total = COSTS_N_INSNS (1);
34993 return true;
34995 /* FALLTHRU */
34997 case GT:
34998 case LT:
34999 case UNORDERED:
35000 if (outer_code == SET)
35002 if (XEXP (x, 1) == const0_rtx)
35004 *total = COSTS_N_INSNS (2);
35005 return true;
35007 else
35009 *total = COSTS_N_INSNS (3);
35010 return false;
35013 /* CC COMPARE. */
35014 if (outer_code == COMPARE)
35016 *total = 0;
35017 return true;
35019 break;
35021 default:
35022 break;
35025 return false;
35028 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
35030 static bool
35031 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
35032 int opno, int *total, bool speed)
35034 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
35036 fprintf (stderr,
35037 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
35038 "opno = %d, total = %d, speed = %s, x:\n",
35039 ret ? "complete" : "scan inner",
35040 GET_MODE_NAME (mode),
35041 GET_RTX_NAME (outer_code),
35042 opno,
35043 *total,
35044 speed ? "true" : "false");
35046 debug_rtx (x);
35048 return ret;
35051 static int
35052 rs6000_insn_cost (rtx_insn *insn, bool speed)
35054 if (recog_memoized (insn) < 0)
35055 return 0;
35057 if (!speed)
35058 return get_attr_length (insn);
35060 int cost = get_attr_cost (insn);
35061 if (cost > 0)
35062 return cost;
35064 int n = get_attr_length (insn) / 4;
35065 enum attr_type type = get_attr_type (insn);
35067 switch (type)
35069 case TYPE_LOAD:
35070 case TYPE_FPLOAD:
35071 case TYPE_VECLOAD:
35072 cost = COSTS_N_INSNS (n + 1);
35073 break;
35075 case TYPE_MUL:
35076 switch (get_attr_size (insn))
35078 case SIZE_8:
35079 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
35080 break;
35081 case SIZE_16:
35082 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
35083 break;
35084 case SIZE_32:
35085 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
35086 break;
35087 case SIZE_64:
35088 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
35089 break;
35090 default:
35091 gcc_unreachable ();
35093 break;
35094 case TYPE_DIV:
35095 switch (get_attr_size (insn))
35097 case SIZE_32:
35098 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
35099 break;
35100 case SIZE_64:
35101 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
35102 break;
35103 default:
35104 gcc_unreachable ();
35106 break;
35108 case TYPE_FP:
35109 cost = n * rs6000_cost->fp;
35110 break;
35111 case TYPE_DMUL:
35112 cost = n * rs6000_cost->dmul;
35113 break;
35114 case TYPE_SDIV:
35115 cost = n * rs6000_cost->sdiv;
35116 break;
35117 case TYPE_DDIV:
35118 cost = n * rs6000_cost->ddiv;
35119 break;
35121 case TYPE_SYNC:
35122 case TYPE_LOAD_L:
35123 case TYPE_MFCR:
35124 case TYPE_MFCRF:
35125 cost = COSTS_N_INSNS (n + 2);
35126 break;
35128 default:
35129 cost = COSTS_N_INSNS (n);
35132 return cost;
35135 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35137 static int
35138 rs6000_debug_address_cost (rtx x, machine_mode mode,
35139 addr_space_t as, bool speed)
35141 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
35143 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35144 ret, speed ? "true" : "false");
35145 debug_rtx (x);
35147 return ret;
35151 /* A C expression returning the cost of moving data from a register of class
35152 CLASS1 to one of CLASS2. */
35154 static int
35155 rs6000_register_move_cost (machine_mode mode,
35156 reg_class_t from, reg_class_t to)
35158 int ret;
35160 if (TARGET_DEBUG_COST)
35161 dbg_cost_ctrl++;
35163 /* Moves from/to GENERAL_REGS. */
35164 if (reg_classes_intersect_p (to, GENERAL_REGS)
35165 || reg_classes_intersect_p (from, GENERAL_REGS))
35167 reg_class_t rclass = from;
35169 if (! reg_classes_intersect_p (to, GENERAL_REGS))
35170 rclass = to;
35172 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
35173 ret = (rs6000_memory_move_cost (mode, rclass, false)
35174 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
35176 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35177 shift. */
35178 else if (rclass == CR_REGS)
35179 ret = 4;
35181 /* For those processors that have slow LR/CTR moves, make them more
35182 expensive than memory in order to bias spills to memory .*/
35183 else if ((rs6000_tune == PROCESSOR_POWER6
35184 || rs6000_tune == PROCESSOR_POWER7
35185 || rs6000_tune == PROCESSOR_POWER8
35186 || rs6000_tune == PROCESSOR_POWER9)
35187 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35188 ret = 6 * hard_regno_nregs (0, mode);
35190 else
35191 /* A move will cost one instruction per GPR moved. */
35192 ret = 2 * hard_regno_nregs (0, mode);
35195 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35196 else if (VECTOR_MEM_VSX_P (mode)
35197 && reg_classes_intersect_p (to, VSX_REGS)
35198 && reg_classes_intersect_p (from, VSX_REGS))
35199 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35201 /* Moving between two similar registers is just one instruction. */
35202 else if (reg_classes_intersect_p (to, from))
35203 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35205 /* Everything else has to go through GENERAL_REGS. */
35206 else
35207 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35208 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35210 if (TARGET_DEBUG_COST)
35212 if (dbg_cost_ctrl == 1)
35213 fprintf (stderr,
35214 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35215 ret, GET_MODE_NAME (mode), reg_class_names[from],
35216 reg_class_names[to]);
35217 dbg_cost_ctrl--;
35220 return ret;
35223 /* A C expressions returning the cost of moving data of MODE from a register to
35224 or from memory. */
35226 static int
35227 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35228 bool in ATTRIBUTE_UNUSED)
35230 int ret;
35232 if (TARGET_DEBUG_COST)
35233 dbg_cost_ctrl++;
35235 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35236 ret = 4 * hard_regno_nregs (0, mode);
35237 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35238 || reg_classes_intersect_p (rclass, VSX_REGS)))
35239 ret = 4 * hard_regno_nregs (32, mode);
35240 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35241 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35242 else
35243 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35245 if (TARGET_DEBUG_COST)
35247 if (dbg_cost_ctrl == 1)
35248 fprintf (stderr,
35249 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35250 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35251 dbg_cost_ctrl--;
35254 return ret;
35257 /* Returns a code for a target-specific builtin that implements
35258 reciprocal of the function, or NULL_TREE if not available. */
35260 static tree
35261 rs6000_builtin_reciprocal (tree fndecl)
35263 switch (DECL_FUNCTION_CODE (fndecl))
35265 case VSX_BUILTIN_XVSQRTDP:
35266 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35267 return NULL_TREE;
35269 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35271 case VSX_BUILTIN_XVSQRTSP:
35272 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35273 return NULL_TREE;
35275 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35277 default:
35278 return NULL_TREE;
35282 /* Load up a constant. If the mode is a vector mode, splat the value across
35283 all of the vector elements. */
35285 static rtx
35286 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35288 rtx reg;
35290 if (mode == SFmode || mode == DFmode)
35292 rtx d = const_double_from_real_value (dconst, mode);
35293 reg = force_reg (mode, d);
35295 else if (mode == V4SFmode)
35297 rtx d = const_double_from_real_value (dconst, SFmode);
35298 rtvec v = gen_rtvec (4, d, d, d, d);
35299 reg = gen_reg_rtx (mode);
35300 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35302 else if (mode == V2DFmode)
35304 rtx d = const_double_from_real_value (dconst, DFmode);
35305 rtvec v = gen_rtvec (2, d, d);
35306 reg = gen_reg_rtx (mode);
35307 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35309 else
35310 gcc_unreachable ();
35312 return reg;
35315 /* Generate an FMA instruction. */
35317 static void
35318 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35320 machine_mode mode = GET_MODE (target);
35321 rtx dst;
35323 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35324 gcc_assert (dst != NULL);
35326 if (dst != target)
35327 emit_move_insn (target, dst);
35330 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35332 static void
35333 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35335 machine_mode mode = GET_MODE (dst);
35336 rtx r;
35338 /* This is a tad more complicated, since the fnma_optab is for
35339 a different expression: fma(-m1, m2, a), which is the same
35340 thing except in the case of signed zeros.
35342 Fortunately we know that if FMA is supported that FNMSUB is
35343 also supported in the ISA. Just expand it directly. */
35345 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35347 r = gen_rtx_NEG (mode, a);
35348 r = gen_rtx_FMA (mode, m1, m2, r);
35349 r = gen_rtx_NEG (mode, r);
35350 emit_insn (gen_rtx_SET (dst, r));
35353 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35354 add a reg_note saying that this was a division. Support both scalar and
35355 vector divide. Assumes no trapping math and finite arguments. */
35357 void
35358 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35360 machine_mode mode = GET_MODE (dst);
35361 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35362 int i;
35364 /* Low precision estimates guarantee 5 bits of accuracy. High
35365 precision estimates guarantee 14 bits of accuracy. SFmode
35366 requires 23 bits of accuracy. DFmode requires 52 bits of
35367 accuracy. Each pass at least doubles the accuracy, leading
35368 to the following. */
35369 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35370 if (mode == DFmode || mode == V2DFmode)
35371 passes++;
35373 enum insn_code code = optab_handler (smul_optab, mode);
35374 insn_gen_fn gen_mul = GEN_FCN (code);
35376 gcc_assert (code != CODE_FOR_nothing);
35378 one = rs6000_load_constant_and_splat (mode, dconst1);
35380 /* x0 = 1./d estimate */
35381 x0 = gen_reg_rtx (mode);
35382 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35383 UNSPEC_FRES)));
35385 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35386 if (passes > 1) {
35388 /* e0 = 1. - d * x0 */
35389 e0 = gen_reg_rtx (mode);
35390 rs6000_emit_nmsub (e0, d, x0, one);
35392 /* x1 = x0 + e0 * x0 */
35393 x1 = gen_reg_rtx (mode);
35394 rs6000_emit_madd (x1, e0, x0, x0);
35396 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35397 ++i, xprev = xnext, eprev = enext) {
35399 /* enext = eprev * eprev */
35400 enext = gen_reg_rtx (mode);
35401 emit_insn (gen_mul (enext, eprev, eprev));
35403 /* xnext = xprev + enext * xprev */
35404 xnext = gen_reg_rtx (mode);
35405 rs6000_emit_madd (xnext, enext, xprev, xprev);
35408 } else
35409 xprev = x0;
35411 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35413 /* u = n * xprev */
35414 u = gen_reg_rtx (mode);
35415 emit_insn (gen_mul (u, n, xprev));
35417 /* v = n - (d * u) */
35418 v = gen_reg_rtx (mode);
35419 rs6000_emit_nmsub (v, d, u, n);
35421 /* dst = (v * xprev) + u */
35422 rs6000_emit_madd (dst, v, xprev, u);
35424 if (note_p)
35425 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35428 /* Goldschmidt's Algorithm for single/double-precision floating point
35429 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35431 void
35432 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35434 machine_mode mode = GET_MODE (src);
35435 rtx e = gen_reg_rtx (mode);
35436 rtx g = gen_reg_rtx (mode);
35437 rtx h = gen_reg_rtx (mode);
35439 /* Low precision estimates guarantee 5 bits of accuracy. High
35440 precision estimates guarantee 14 bits of accuracy. SFmode
35441 requires 23 bits of accuracy. DFmode requires 52 bits of
35442 accuracy. Each pass at least doubles the accuracy, leading
35443 to the following. */
35444 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35445 if (mode == DFmode || mode == V2DFmode)
35446 passes++;
35448 int i;
35449 rtx mhalf;
35450 enum insn_code code = optab_handler (smul_optab, mode);
35451 insn_gen_fn gen_mul = GEN_FCN (code);
35453 gcc_assert (code != CODE_FOR_nothing);
35455 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35457 /* e = rsqrt estimate */
35458 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35459 UNSPEC_RSQRT)));
35461 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35462 if (!recip)
35464 rtx zero = force_reg (mode, CONST0_RTX (mode));
35466 if (mode == SFmode)
35468 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35469 e, zero, mode, 0);
35470 if (target != e)
35471 emit_move_insn (e, target);
35473 else
35475 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35476 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35480 /* g = sqrt estimate. */
35481 emit_insn (gen_mul (g, e, src));
35482 /* h = 1/(2*sqrt) estimate. */
35483 emit_insn (gen_mul (h, e, mhalf));
35485 if (recip)
35487 if (passes == 1)
35489 rtx t = gen_reg_rtx (mode);
35490 rs6000_emit_nmsub (t, g, h, mhalf);
35491 /* Apply correction directly to 1/rsqrt estimate. */
35492 rs6000_emit_madd (dst, e, t, e);
35494 else
35496 for (i = 0; i < passes; i++)
35498 rtx t1 = gen_reg_rtx (mode);
35499 rtx g1 = gen_reg_rtx (mode);
35500 rtx h1 = gen_reg_rtx (mode);
35502 rs6000_emit_nmsub (t1, g, h, mhalf);
35503 rs6000_emit_madd (g1, g, t1, g);
35504 rs6000_emit_madd (h1, h, t1, h);
35506 g = g1;
35507 h = h1;
35509 /* Multiply by 2 for 1/rsqrt. */
35510 emit_insn (gen_add3_insn (dst, h, h));
35513 else
35515 rtx t = gen_reg_rtx (mode);
35516 rs6000_emit_nmsub (t, g, h, mhalf);
35517 rs6000_emit_madd (dst, g, t, g);
35520 return;
35523 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35524 (Power7) targets. DST is the target, and SRC is the argument operand. */
35526 void
35527 rs6000_emit_popcount (rtx dst, rtx src)
35529 machine_mode mode = GET_MODE (dst);
35530 rtx tmp1, tmp2;
35532 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35533 if (TARGET_POPCNTD)
35535 if (mode == SImode)
35536 emit_insn (gen_popcntdsi2 (dst, src));
35537 else
35538 emit_insn (gen_popcntddi2 (dst, src));
35539 return;
35542 tmp1 = gen_reg_rtx (mode);
35544 if (mode == SImode)
35546 emit_insn (gen_popcntbsi2 (tmp1, src));
35547 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35548 NULL_RTX, 0);
35549 tmp2 = force_reg (SImode, tmp2);
35550 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35552 else
35554 emit_insn (gen_popcntbdi2 (tmp1, src));
35555 tmp2 = expand_mult (DImode, tmp1,
35556 GEN_INT ((HOST_WIDE_INT)
35557 0x01010101 << 32 | 0x01010101),
35558 NULL_RTX, 0);
35559 tmp2 = force_reg (DImode, tmp2);
35560 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35565 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35566 target, and SRC is the argument operand. */
35568 void
35569 rs6000_emit_parity (rtx dst, rtx src)
35571 machine_mode mode = GET_MODE (dst);
35572 rtx tmp;
35574 tmp = gen_reg_rtx (mode);
35576 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35577 if (TARGET_CMPB)
35579 if (mode == SImode)
35581 emit_insn (gen_popcntbsi2 (tmp, src));
35582 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35584 else
35586 emit_insn (gen_popcntbdi2 (tmp, src));
35587 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35589 return;
35592 if (mode == SImode)
35594 /* Is mult+shift >= shift+xor+shift+xor? */
35595 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35597 rtx tmp1, tmp2, tmp3, tmp4;
35599 tmp1 = gen_reg_rtx (SImode);
35600 emit_insn (gen_popcntbsi2 (tmp1, src));
35602 tmp2 = gen_reg_rtx (SImode);
35603 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35604 tmp3 = gen_reg_rtx (SImode);
35605 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35607 tmp4 = gen_reg_rtx (SImode);
35608 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35609 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35611 else
35612 rs6000_emit_popcount (tmp, src);
35613 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35615 else
35617 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35618 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35620 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35622 tmp1 = gen_reg_rtx (DImode);
35623 emit_insn (gen_popcntbdi2 (tmp1, src));
35625 tmp2 = gen_reg_rtx (DImode);
35626 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35627 tmp3 = gen_reg_rtx (DImode);
35628 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35630 tmp4 = gen_reg_rtx (DImode);
35631 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35632 tmp5 = gen_reg_rtx (DImode);
35633 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35635 tmp6 = gen_reg_rtx (DImode);
35636 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35637 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35639 else
35640 rs6000_emit_popcount (tmp, src);
35641 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35645 /* Expand an Altivec constant permutation for little endian mode.
35646 OP0 and OP1 are the input vectors and TARGET is the output vector.
35647 SEL specifies the constant permutation vector.
35649 There are two issues: First, the two input operands must be
35650 swapped so that together they form a double-wide array in LE
35651 order. Second, the vperm instruction has surprising behavior
35652 in LE mode: it interprets the elements of the source vectors
35653 in BE mode ("left to right") and interprets the elements of
35654 the destination vector in LE mode ("right to left"). To
35655 correct for this, we must subtract each element of the permute
35656 control vector from 31.
35658 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35659 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35660 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35661 serve as the permute control vector. Then, in BE mode,
35663 vperm 9,10,11,12
35665 places the desired result in vr9. However, in LE mode the
35666 vector contents will be
35668 vr10 = 00000003 00000002 00000001 00000000
35669 vr11 = 00000007 00000006 00000005 00000004
35671 The result of the vperm using the same permute control vector is
35673 vr9 = 05000000 07000000 01000000 03000000
35675 That is, the leftmost 4 bytes of vr10 are interpreted as the
35676 source for the rightmost 4 bytes of vr9, and so on.
35678 If we change the permute control vector to
35680 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35682 and issue
35684 vperm 9,11,10,12
35686 we get the desired
35688 vr9 = 00000006 00000004 00000002 00000000. */
35690 static void
35691 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35692 const vec_perm_indices &sel)
35694 unsigned int i;
35695 rtx perm[16];
35696 rtx constv, unspec;
35698 /* Unpack and adjust the constant selector. */
35699 for (i = 0; i < 16; ++i)
35701 unsigned int elt = 31 - (sel[i] & 31);
35702 perm[i] = GEN_INT (elt);
35705 /* Expand to a permute, swapping the inputs and using the
35706 adjusted selector. */
35707 if (!REG_P (op0))
35708 op0 = force_reg (V16QImode, op0);
35709 if (!REG_P (op1))
35710 op1 = force_reg (V16QImode, op1);
35712 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35713 constv = force_reg (V16QImode, constv);
35714 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35715 UNSPEC_VPERM);
35716 if (!REG_P (target))
35718 rtx tmp = gen_reg_rtx (V16QImode);
35719 emit_move_insn (tmp, unspec);
35720 unspec = tmp;
35723 emit_move_insn (target, unspec);
35726 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35727 permute control vector. But here it's not a constant, so we must
35728 generate a vector NAND or NOR to do the adjustment. */
35730 void
35731 altivec_expand_vec_perm_le (rtx operands[4])
35733 rtx notx, iorx, unspec;
35734 rtx target = operands[0];
35735 rtx op0 = operands[1];
35736 rtx op1 = operands[2];
35737 rtx sel = operands[3];
35738 rtx tmp = target;
35739 rtx norreg = gen_reg_rtx (V16QImode);
35740 machine_mode mode = GET_MODE (target);
35742 /* Get everything in regs so the pattern matches. */
35743 if (!REG_P (op0))
35744 op0 = force_reg (mode, op0);
35745 if (!REG_P (op1))
35746 op1 = force_reg (mode, op1);
35747 if (!REG_P (sel))
35748 sel = force_reg (V16QImode, sel);
35749 if (!REG_P (target))
35750 tmp = gen_reg_rtx (mode);
35752 if (TARGET_P9_VECTOR)
35754 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35755 UNSPEC_VPERMR);
35757 else
35759 /* Invert the selector with a VNAND if available, else a VNOR.
35760 The VNAND is preferred for future fusion opportunities. */
35761 notx = gen_rtx_NOT (V16QImode, sel);
35762 iorx = (TARGET_P8_VECTOR
35763 ? gen_rtx_IOR (V16QImode, notx, notx)
35764 : gen_rtx_AND (V16QImode, notx, notx));
35765 emit_insn (gen_rtx_SET (norreg, iorx));
35767 /* Permute with operands reversed and adjusted selector. */
35768 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35769 UNSPEC_VPERM);
35772 /* Copy into target, possibly by way of a register. */
35773 if (!REG_P (target))
35775 emit_move_insn (tmp, unspec);
35776 unspec = tmp;
35779 emit_move_insn (target, unspec);
35782 /* Expand an Altivec constant permutation. Return true if we match
35783 an efficient implementation; false to fall back to VPERM.
35785 OP0 and OP1 are the input vectors and TARGET is the output vector.
35786 SEL specifies the constant permutation vector. */
35788 static bool
35789 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35790 const vec_perm_indices &sel)
35792 struct altivec_perm_insn {
35793 HOST_WIDE_INT mask;
35794 enum insn_code impl;
35795 unsigned char perm[16];
35797 static const struct altivec_perm_insn patterns[] = {
35798 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35799 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35800 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35801 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35802 { OPTION_MASK_ALTIVEC,
35803 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35804 : CODE_FOR_altivec_vmrglb_direct),
35805 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35806 { OPTION_MASK_ALTIVEC,
35807 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35808 : CODE_FOR_altivec_vmrglh_direct),
35809 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35810 { OPTION_MASK_ALTIVEC,
35811 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35812 : CODE_FOR_altivec_vmrglw_direct),
35813 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35814 { OPTION_MASK_ALTIVEC,
35815 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35816 : CODE_FOR_altivec_vmrghb_direct),
35817 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35818 { OPTION_MASK_ALTIVEC,
35819 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35820 : CODE_FOR_altivec_vmrghh_direct),
35821 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35822 { OPTION_MASK_ALTIVEC,
35823 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35824 : CODE_FOR_altivec_vmrghw_direct),
35825 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35826 { OPTION_MASK_P8_VECTOR,
35827 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35828 : CODE_FOR_p8_vmrgow_v4sf_direct),
35829 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35830 { OPTION_MASK_P8_VECTOR,
35831 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35832 : CODE_FOR_p8_vmrgew_v4sf_direct),
35833 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35836 unsigned int i, j, elt, which;
35837 unsigned char perm[16];
35838 rtx x;
35839 bool one_vec;
35841 /* Unpack the constant selector. */
35842 for (i = which = 0; i < 16; ++i)
35844 elt = sel[i] & 31;
35845 which |= (elt < 16 ? 1 : 2);
35846 perm[i] = elt;
35849 /* Simplify the constant selector based on operands. */
35850 switch (which)
35852 default:
35853 gcc_unreachable ();
35855 case 3:
35856 one_vec = false;
35857 if (!rtx_equal_p (op0, op1))
35858 break;
35859 /* FALLTHRU */
35861 case 2:
35862 for (i = 0; i < 16; ++i)
35863 perm[i] &= 15;
35864 op0 = op1;
35865 one_vec = true;
35866 break;
35868 case 1:
35869 op1 = op0;
35870 one_vec = true;
35871 break;
35874 /* Look for splat patterns. */
35875 if (one_vec)
35877 elt = perm[0];
35879 for (i = 0; i < 16; ++i)
35880 if (perm[i] != elt)
35881 break;
35882 if (i == 16)
35884 if (!BYTES_BIG_ENDIAN)
35885 elt = 15 - elt;
35886 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35887 return true;
35890 if (elt % 2 == 0)
35892 for (i = 0; i < 16; i += 2)
35893 if (perm[i] != elt || perm[i + 1] != elt + 1)
35894 break;
35895 if (i == 16)
35897 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35898 x = gen_reg_rtx (V8HImode);
35899 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35900 GEN_INT (field)));
35901 emit_move_insn (target, gen_lowpart (V16QImode, x));
35902 return true;
35906 if (elt % 4 == 0)
35908 for (i = 0; i < 16; i += 4)
35909 if (perm[i] != elt
35910 || perm[i + 1] != elt + 1
35911 || perm[i + 2] != elt + 2
35912 || perm[i + 3] != elt + 3)
35913 break;
35914 if (i == 16)
35916 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35917 x = gen_reg_rtx (V4SImode);
35918 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35919 GEN_INT (field)));
35920 emit_move_insn (target, gen_lowpart (V16QImode, x));
35921 return true;
35926 /* Look for merge and pack patterns. */
35927 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35929 bool swapped;
35931 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35932 continue;
35934 elt = patterns[j].perm[0];
35935 if (perm[0] == elt)
35936 swapped = false;
35937 else if (perm[0] == elt + 16)
35938 swapped = true;
35939 else
35940 continue;
35941 for (i = 1; i < 16; ++i)
35943 elt = patterns[j].perm[i];
35944 if (swapped)
35945 elt = (elt >= 16 ? elt - 16 : elt + 16);
35946 else if (one_vec && elt >= 16)
35947 elt -= 16;
35948 if (perm[i] != elt)
35949 break;
35951 if (i == 16)
35953 enum insn_code icode = patterns[j].impl;
35954 machine_mode omode = insn_data[icode].operand[0].mode;
35955 machine_mode imode = insn_data[icode].operand[1].mode;
35957 /* For little-endian, don't use vpkuwum and vpkuhum if the
35958 underlying vector type is not V4SI and V8HI, respectively.
35959 For example, using vpkuwum with a V8HI picks up the even
35960 halfwords (BE numbering) when the even halfwords (LE
35961 numbering) are what we need. */
35962 if (!BYTES_BIG_ENDIAN
35963 && icode == CODE_FOR_altivec_vpkuwum_direct
35964 && ((GET_CODE (op0) == REG
35965 && GET_MODE (op0) != V4SImode)
35966 || (GET_CODE (op0) == SUBREG
35967 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35968 continue;
35969 if (!BYTES_BIG_ENDIAN
35970 && icode == CODE_FOR_altivec_vpkuhum_direct
35971 && ((GET_CODE (op0) == REG
35972 && GET_MODE (op0) != V8HImode)
35973 || (GET_CODE (op0) == SUBREG
35974 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35975 continue;
35977 /* For little-endian, the two input operands must be swapped
35978 (or swapped back) to ensure proper right-to-left numbering
35979 from 0 to 2N-1. */
35980 if (swapped ^ !BYTES_BIG_ENDIAN)
35981 std::swap (op0, op1);
35982 if (imode != V16QImode)
35984 op0 = gen_lowpart (imode, op0);
35985 op1 = gen_lowpart (imode, op1);
35987 if (omode == V16QImode)
35988 x = target;
35989 else
35990 x = gen_reg_rtx (omode);
35991 emit_insn (GEN_FCN (icode) (x, op0, op1));
35992 if (omode != V16QImode)
35993 emit_move_insn (target, gen_lowpart (V16QImode, x));
35994 return true;
35998 if (!BYTES_BIG_ENDIAN)
36000 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
36001 return true;
36004 return false;
36007 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
36008 Return true if we match an efficient implementation. */
36010 static bool
36011 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
36012 unsigned char perm0, unsigned char perm1)
36014 rtx x;
36016 /* If both selectors come from the same operand, fold to single op. */
36017 if ((perm0 & 2) == (perm1 & 2))
36019 if (perm0 & 2)
36020 op0 = op1;
36021 else
36022 op1 = op0;
36024 /* If both operands are equal, fold to simpler permutation. */
36025 if (rtx_equal_p (op0, op1))
36027 perm0 = perm0 & 1;
36028 perm1 = (perm1 & 1) + 2;
36030 /* If the first selector comes from the second operand, swap. */
36031 else if (perm0 & 2)
36033 if (perm1 & 2)
36034 return false;
36035 perm0 -= 2;
36036 perm1 += 2;
36037 std::swap (op0, op1);
36039 /* If the second selector does not come from the second operand, fail. */
36040 else if ((perm1 & 2) == 0)
36041 return false;
36043 /* Success! */
36044 if (target != NULL)
36046 machine_mode vmode, dmode;
36047 rtvec v;
36049 vmode = GET_MODE (target);
36050 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
36051 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
36052 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
36053 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
36054 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
36055 emit_insn (gen_rtx_SET (target, x));
36057 return true;
36060 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
36062 static bool
36063 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
36064 rtx op1, const vec_perm_indices &sel)
36066 bool testing_p = !target;
36068 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
36069 if (TARGET_ALTIVEC && testing_p)
36070 return true;
36072 /* Check for ps_merge* or xxpermdi insns. */
36073 if ((vmode == V2SFmode && TARGET_PAIRED_FLOAT)
36074 || ((vmode == V2DFmode || vmode == V2DImode)
36075 && VECTOR_MEM_VSX_P (vmode)))
36077 if (testing_p)
36079 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
36080 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
36082 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
36083 return true;
36086 if (TARGET_ALTIVEC)
36088 /* Force the target-independent code to lower to V16QImode. */
36089 if (vmode != V16QImode)
36090 return false;
36091 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
36092 return true;
36095 return false;
36098 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
36099 OP0 and OP1 are the input vectors and TARGET is the output vector.
36100 PERM specifies the constant permutation vector. */
36102 static void
36103 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
36104 machine_mode vmode, const vec_perm_builder &perm)
36106 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
36107 if (x != target)
36108 emit_move_insn (target, x);
36111 /* Expand an extract even operation. */
36113 void
36114 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
36116 machine_mode vmode = GET_MODE (target);
36117 unsigned i, nelt = GET_MODE_NUNITS (vmode);
36118 vec_perm_builder perm (nelt, nelt, 1);
36120 for (i = 0; i < nelt; i++)
36121 perm.quick_push (i * 2);
36123 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36126 /* Expand a vector interleave operation. */
36128 void
36129 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
36131 machine_mode vmode = GET_MODE (target);
36132 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
36133 vec_perm_builder perm (nelt, nelt, 1);
36135 high = (highp ? 0 : nelt / 2);
36136 for (i = 0; i < nelt / 2; i++)
36138 perm.quick_push (i + high);
36139 perm.quick_push (i + nelt + high);
36142 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36145 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36146 void
36147 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
36149 HOST_WIDE_INT hwi_scale (scale);
36150 REAL_VALUE_TYPE r_pow;
36151 rtvec v = rtvec_alloc (2);
36152 rtx elt;
36153 rtx scale_vec = gen_reg_rtx (V2DFmode);
36154 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
36155 elt = const_double_from_real_value (r_pow, DFmode);
36156 RTVEC_ELT (v, 0) = elt;
36157 RTVEC_ELT (v, 1) = elt;
36158 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
36159 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
36162 /* Return an RTX representing where to find the function value of a
36163 function returning MODE. */
36164 static rtx
36165 rs6000_complex_function_value (machine_mode mode)
36167 unsigned int regno;
36168 rtx r1, r2;
36169 machine_mode inner = GET_MODE_INNER (mode);
36170 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
36172 if (TARGET_FLOAT128_TYPE
36173 && (mode == KCmode
36174 || (mode == TCmode && TARGET_IEEEQUAD)))
36175 regno = ALTIVEC_ARG_RETURN;
36177 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36178 regno = FP_ARG_RETURN;
36180 else
36182 regno = GP_ARG_RETURN;
36184 /* 32-bit is OK since it'll go in r3/r4. */
36185 if (TARGET_32BIT && inner_bytes >= 4)
36186 return gen_rtx_REG (mode, regno);
36189 if (inner_bytes >= 8)
36190 return gen_rtx_REG (mode, regno);
36192 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36193 const0_rtx);
36194 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36195 GEN_INT (inner_bytes));
36196 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36199 /* Return an rtx describing a return value of MODE as a PARALLEL
36200 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36201 stride REG_STRIDE. */
36203 static rtx
36204 rs6000_parallel_return (machine_mode mode,
36205 int n_elts, machine_mode elt_mode,
36206 unsigned int regno, unsigned int reg_stride)
36208 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36210 int i;
36211 for (i = 0; i < n_elts; i++)
36213 rtx r = gen_rtx_REG (elt_mode, regno);
36214 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36215 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36216 regno += reg_stride;
36219 return par;
36222 /* Target hook for TARGET_FUNCTION_VALUE.
36224 An integer value is in r3 and a floating-point value is in fp1,
36225 unless -msoft-float. */
36227 static rtx
36228 rs6000_function_value (const_tree valtype,
36229 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36230 bool outgoing ATTRIBUTE_UNUSED)
36232 machine_mode mode;
36233 unsigned int regno;
36234 machine_mode elt_mode;
36235 int n_elts;
36237 /* Special handling for structs in darwin64. */
36238 if (TARGET_MACHO
36239 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36241 CUMULATIVE_ARGS valcum;
36242 rtx valret;
36244 valcum.words = 0;
36245 valcum.fregno = FP_ARG_MIN_REG;
36246 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36247 /* Do a trial code generation as if this were going to be passed as
36248 an argument; if any part goes in memory, we return NULL. */
36249 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36250 if (valret)
36251 return valret;
36252 /* Otherwise fall through to standard ABI rules. */
36255 mode = TYPE_MODE (valtype);
36257 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36258 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36260 int first_reg, n_regs;
36262 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36264 /* _Decimal128 must use even/odd register pairs. */
36265 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36266 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36268 else
36270 first_reg = ALTIVEC_ARG_RETURN;
36271 n_regs = 1;
36274 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36277 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36278 if (TARGET_32BIT && TARGET_POWERPC64)
36279 switch (mode)
36281 default:
36282 break;
36283 case E_DImode:
36284 case E_SCmode:
36285 case E_DCmode:
36286 case E_TCmode:
36287 int count = GET_MODE_SIZE (mode) / 4;
36288 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36291 if ((INTEGRAL_TYPE_P (valtype)
36292 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36293 || POINTER_TYPE_P (valtype))
36294 mode = TARGET_32BIT ? SImode : DImode;
36296 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36297 /* _Decimal128 must use an even/odd register pair. */
36298 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36299 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36300 && !FLOAT128_VECTOR_P (mode)
36301 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
36302 regno = FP_ARG_RETURN;
36303 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36304 && targetm.calls.split_complex_arg)
36305 return rs6000_complex_function_value (mode);
36306 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36307 return register is used in both cases, and we won't see V2DImode/V2DFmode
36308 for pure altivec, combine the two cases. */
36309 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36310 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36311 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36312 regno = ALTIVEC_ARG_RETURN;
36313 else
36314 regno = GP_ARG_RETURN;
36316 return gen_rtx_REG (mode, regno);
36319 /* Define how to find the value returned by a library function
36320 assuming the value has mode MODE. */
36322 rs6000_libcall_value (machine_mode mode)
36324 unsigned int regno;
36326 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36327 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36328 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36330 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36331 /* _Decimal128 must use an even/odd register pair. */
36332 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36333 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
36334 && TARGET_HARD_FLOAT
36335 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
36336 regno = FP_ARG_RETURN;
36337 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36338 return register is used in both cases, and we won't see V2DImode/V2DFmode
36339 for pure altivec, combine the two cases. */
36340 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36341 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36342 regno = ALTIVEC_ARG_RETURN;
36343 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36344 return rs6000_complex_function_value (mode);
36345 else
36346 regno = GP_ARG_RETURN;
36348 return gen_rtx_REG (mode, regno);
36351 /* Compute register pressure classes. We implement the target hook to avoid
36352 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36353 lead to incorrect estimates of number of available registers and therefor
36354 increased register pressure/spill. */
36355 static int
36356 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36358 int n;
36360 n = 0;
36361 pressure_classes[n++] = GENERAL_REGS;
36362 if (TARGET_VSX)
36363 pressure_classes[n++] = VSX_REGS;
36364 else
36366 if (TARGET_ALTIVEC)
36367 pressure_classes[n++] = ALTIVEC_REGS;
36368 if (TARGET_HARD_FLOAT)
36369 pressure_classes[n++] = FLOAT_REGS;
36371 pressure_classes[n++] = CR_REGS;
36372 pressure_classes[n++] = SPECIAL_REGS;
36374 return n;
36377 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36378 Frame pointer elimination is automatically handled.
36380 For the RS/6000, if frame pointer elimination is being done, we would like
36381 to convert ap into fp, not sp.
36383 We need r30 if -mminimal-toc was specified, and there are constant pool
36384 references. */
36386 static bool
36387 rs6000_can_eliminate (const int from, const int to)
36389 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36390 ? ! frame_pointer_needed
36391 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36392 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36393 || constant_pool_empty_p ()
36394 : true);
36397 /* Define the offset between two registers, FROM to be eliminated and its
36398 replacement TO, at the start of a routine. */
36399 HOST_WIDE_INT
36400 rs6000_initial_elimination_offset (int from, int to)
36402 rs6000_stack_t *info = rs6000_stack_info ();
36403 HOST_WIDE_INT offset;
36405 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36406 offset = info->push_p ? 0 : -info->total_size;
36407 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36409 offset = info->push_p ? 0 : -info->total_size;
36410 if (FRAME_GROWS_DOWNWARD)
36411 offset += info->fixed_size + info->vars_size + info->parm_size;
36413 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36414 offset = FRAME_GROWS_DOWNWARD
36415 ? info->fixed_size + info->vars_size + info->parm_size
36416 : 0;
36417 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36418 offset = info->total_size;
36419 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36420 offset = info->push_p ? info->total_size : 0;
36421 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36422 offset = 0;
36423 else
36424 gcc_unreachable ();
36426 return offset;
36429 /* Fill in sizes of registers used by unwinder. */
36431 static void
36432 rs6000_init_dwarf_reg_sizes_extra (tree address)
36434 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36436 int i;
36437 machine_mode mode = TYPE_MODE (char_type_node);
36438 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36439 rtx mem = gen_rtx_MEM (BLKmode, addr);
36440 rtx value = gen_int_mode (16, mode);
36442 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36443 The unwinder still needs to know the size of Altivec registers. */
36445 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36447 int column = DWARF_REG_TO_UNWIND_COLUMN
36448 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36449 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36451 emit_move_insn (adjust_address (mem, mode, offset), value);
36456 /* Map internal gcc register numbers to debug format register numbers.
36457 FORMAT specifies the type of debug register number to use:
36458 0 -- debug information, except for frame-related sections
36459 1 -- DWARF .debug_frame section
36460 2 -- DWARF .eh_frame section */
36462 unsigned int
36463 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36465 /* Except for the above, we use the internal number for non-DWARF
36466 debug information, and also for .eh_frame. */
36467 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36468 return regno;
36470 /* On some platforms, we use the standard DWARF register
36471 numbering for .debug_info and .debug_frame. */
36472 #ifdef RS6000_USE_DWARF_NUMBERING
36473 if (regno <= 63)
36474 return regno;
36475 if (regno == LR_REGNO)
36476 return 108;
36477 if (regno == CTR_REGNO)
36478 return 109;
36479 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36480 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36481 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36482 to the DWARF reg for CR. */
36483 if (format == 1 && regno == CR2_REGNO)
36484 return 64;
36485 if (CR_REGNO_P (regno))
36486 return regno - CR0_REGNO + 86;
36487 if (regno == CA_REGNO)
36488 return 101; /* XER */
36489 if (ALTIVEC_REGNO_P (regno))
36490 return regno - FIRST_ALTIVEC_REGNO + 1124;
36491 if (regno == VRSAVE_REGNO)
36492 return 356;
36493 if (regno == VSCR_REGNO)
36494 return 67;
36495 #endif
36496 return regno;
36499 /* target hook eh_return_filter_mode */
36500 static scalar_int_mode
36501 rs6000_eh_return_filter_mode (void)
36503 return TARGET_32BIT ? SImode : word_mode;
36506 /* Target hook for scalar_mode_supported_p. */
36507 static bool
36508 rs6000_scalar_mode_supported_p (scalar_mode mode)
36510 /* -m32 does not support TImode. This is the default, from
36511 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36512 same ABI as for -m32. But default_scalar_mode_supported_p allows
36513 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36514 for -mpowerpc64. */
36515 if (TARGET_32BIT && mode == TImode)
36516 return false;
36518 if (DECIMAL_FLOAT_MODE_P (mode))
36519 return default_decimal_float_supported_p ();
36520 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36521 return true;
36522 else
36523 return default_scalar_mode_supported_p (mode);
36526 /* Target hook for vector_mode_supported_p. */
36527 static bool
36528 rs6000_vector_mode_supported_p (machine_mode mode)
36531 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36532 return true;
36534 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36535 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36536 double-double. */
36537 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36538 return true;
36540 else
36541 return false;
36544 /* Target hook for floatn_mode. */
36545 static opt_scalar_float_mode
36546 rs6000_floatn_mode (int n, bool extended)
36548 if (extended)
36550 switch (n)
36552 case 32:
36553 return DFmode;
36555 case 64:
36556 if (TARGET_FLOAT128_TYPE)
36557 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36558 else
36559 return opt_scalar_float_mode ();
36561 case 128:
36562 return opt_scalar_float_mode ();
36564 default:
36565 /* Those are the only valid _FloatNx types. */
36566 gcc_unreachable ();
36569 else
36571 switch (n)
36573 case 32:
36574 return SFmode;
36576 case 64:
36577 return DFmode;
36579 case 128:
36580 if (TARGET_FLOAT128_TYPE)
36581 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36582 else
36583 return opt_scalar_float_mode ();
36585 default:
36586 return opt_scalar_float_mode ();
36592 /* Target hook for c_mode_for_suffix. */
36593 static machine_mode
36594 rs6000_c_mode_for_suffix (char suffix)
36596 if (TARGET_FLOAT128_TYPE)
36598 if (suffix == 'q' || suffix == 'Q')
36599 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36601 /* At the moment, we are not defining a suffix for IBM extended double.
36602 If/when the default for -mabi=ieeelongdouble is changed, and we want
36603 to support __ibm128 constants in legacy library code, we may need to
36604 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36605 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36606 __float80 constants. */
36609 return VOIDmode;
36612 /* Target hook for invalid_arg_for_unprototyped_fn. */
36613 static const char *
36614 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36616 return (!rs6000_darwin64_abi
36617 && typelist == 0
36618 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36619 && (funcdecl == NULL_TREE
36620 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36621 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36622 ? N_("AltiVec argument passed to unprototyped function")
36623 : NULL;
36626 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36627 setup by using __stack_chk_fail_local hidden function instead of
36628 calling __stack_chk_fail directly. Otherwise it is better to call
36629 __stack_chk_fail directly. */
36631 static tree ATTRIBUTE_UNUSED
36632 rs6000_stack_protect_fail (void)
36634 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36635 ? default_hidden_stack_protect_fail ()
36636 : default_external_stack_protect_fail ();
36639 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36641 #if TARGET_ELF
36642 static unsigned HOST_WIDE_INT
36643 rs6000_asan_shadow_offset (void)
36645 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36647 #endif
36649 /* Mask options that we want to support inside of attribute((target)) and
36650 #pragma GCC target operations. Note, we do not include things like
36651 64/32-bit, endianness, hard/soft floating point, etc. that would have
36652 different calling sequences. */
36654 struct rs6000_opt_mask {
36655 const char *name; /* option name */
36656 HOST_WIDE_INT mask; /* mask to set */
36657 bool invert; /* invert sense of mask */
36658 bool valid_target; /* option is a target option */
36661 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36663 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36664 { "cmpb", OPTION_MASK_CMPB, false, true },
36665 { "crypto", OPTION_MASK_CRYPTO, false, true },
36666 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36667 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36668 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36669 false, true },
36670 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36671 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36672 { "fprnd", OPTION_MASK_FPRND, false, true },
36673 { "hard-dfp", OPTION_MASK_DFP, false, true },
36674 { "htm", OPTION_MASK_HTM, false, true },
36675 { "isel", OPTION_MASK_ISEL, false, true },
36676 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36677 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36678 { "modulo", OPTION_MASK_MODULO, false, true },
36679 { "mulhw", OPTION_MASK_MULHW, false, true },
36680 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36681 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36682 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36683 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36684 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36685 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36686 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36687 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36688 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36689 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36690 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36691 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36692 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36693 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36694 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36695 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36696 { "string", 0, false, true },
36697 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36698 { "update", OPTION_MASK_NO_UPDATE, true , true },
36699 { "vsx", OPTION_MASK_VSX, false, true },
36700 #ifdef OPTION_MASK_64BIT
36701 #if TARGET_AIX_OS
36702 { "aix64", OPTION_MASK_64BIT, false, false },
36703 { "aix32", OPTION_MASK_64BIT, true, false },
36704 #else
36705 { "64", OPTION_MASK_64BIT, false, false },
36706 { "32", OPTION_MASK_64BIT, true, false },
36707 #endif
36708 #endif
36709 #ifdef OPTION_MASK_EABI
36710 { "eabi", OPTION_MASK_EABI, false, false },
36711 #endif
36712 #ifdef OPTION_MASK_LITTLE_ENDIAN
36713 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36714 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36715 #endif
36716 #ifdef OPTION_MASK_RELOCATABLE
36717 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36718 #endif
36719 #ifdef OPTION_MASK_STRICT_ALIGN
36720 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36721 #endif
36722 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36723 { "string", 0, false, false },
36726 /* Builtin mask mapping for printing the flags. */
36727 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36729 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36730 { "vsx", RS6000_BTM_VSX, false, false },
36731 { "paired", RS6000_BTM_PAIRED, false, false },
36732 { "fre", RS6000_BTM_FRE, false, false },
36733 { "fres", RS6000_BTM_FRES, false, false },
36734 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36735 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36736 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36737 { "cell", RS6000_BTM_CELL, false, false },
36738 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36739 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36740 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36741 { "crypto", RS6000_BTM_CRYPTO, false, false },
36742 { "htm", RS6000_BTM_HTM, false, false },
36743 { "hard-dfp", RS6000_BTM_DFP, false, false },
36744 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36745 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36746 { "float128", RS6000_BTM_FLOAT128, false, false },
36747 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36750 /* Option variables that we want to support inside attribute((target)) and
36751 #pragma GCC target operations. */
36753 struct rs6000_opt_var {
36754 const char *name; /* option name */
36755 size_t global_offset; /* offset of the option in global_options. */
36756 size_t target_offset; /* offset of the option in target options. */
36759 static struct rs6000_opt_var const rs6000_opt_vars[] =
36761 { "friz",
36762 offsetof (struct gcc_options, x_TARGET_FRIZ),
36763 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36764 { "avoid-indexed-addresses",
36765 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36766 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36767 { "paired",
36768 offsetof (struct gcc_options, x_rs6000_paired_float),
36769 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36770 { "longcall",
36771 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36772 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36773 { "optimize-swaps",
36774 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36775 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36776 { "allow-movmisalign",
36777 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36778 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36779 { "sched-groups",
36780 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36781 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36782 { "always-hint",
36783 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36784 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36785 { "align-branch-targets",
36786 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36787 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36788 { "tls-markers",
36789 offsetof (struct gcc_options, x_tls_markers),
36790 offsetof (struct cl_target_option, x_tls_markers), },
36791 { "sched-prolog",
36792 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36793 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36794 { "sched-epilog",
36795 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36796 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36797 { "speculate-indirect-jumps",
36798 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36799 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36802 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36803 parsing. Return true if there were no errors. */
36805 static bool
36806 rs6000_inner_target_options (tree args, bool attr_p)
36808 bool ret = true;
36810 if (args == NULL_TREE)
36813 else if (TREE_CODE (args) == STRING_CST)
36815 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36816 char *q;
36818 while ((q = strtok (p, ",")) != NULL)
36820 bool error_p = false;
36821 bool not_valid_p = false;
36822 const char *cpu_opt = NULL;
36824 p = NULL;
36825 if (strncmp (q, "cpu=", 4) == 0)
36827 int cpu_index = rs6000_cpu_name_lookup (q+4);
36828 if (cpu_index >= 0)
36829 rs6000_cpu_index = cpu_index;
36830 else
36832 error_p = true;
36833 cpu_opt = q+4;
36836 else if (strncmp (q, "tune=", 5) == 0)
36838 int tune_index = rs6000_cpu_name_lookup (q+5);
36839 if (tune_index >= 0)
36840 rs6000_tune_index = tune_index;
36841 else
36843 error_p = true;
36844 cpu_opt = q+5;
36847 else
36849 size_t i;
36850 bool invert = false;
36851 char *r = q;
36853 error_p = true;
36854 if (strncmp (r, "no-", 3) == 0)
36856 invert = true;
36857 r += 3;
36860 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36861 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36863 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36865 if (!rs6000_opt_masks[i].valid_target)
36866 not_valid_p = true;
36867 else
36869 error_p = false;
36870 rs6000_isa_flags_explicit |= mask;
36872 /* VSX needs altivec, so -mvsx automagically sets
36873 altivec and disables -mavoid-indexed-addresses. */
36874 if (!invert)
36876 if (mask == OPTION_MASK_VSX)
36878 mask |= OPTION_MASK_ALTIVEC;
36879 TARGET_AVOID_XFORM = 0;
36883 if (rs6000_opt_masks[i].invert)
36884 invert = !invert;
36886 if (invert)
36887 rs6000_isa_flags &= ~mask;
36888 else
36889 rs6000_isa_flags |= mask;
36891 break;
36894 if (error_p && !not_valid_p)
36896 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36897 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36899 size_t j = rs6000_opt_vars[i].global_offset;
36900 *((int *) ((char *)&global_options + j)) = !invert;
36901 error_p = false;
36902 not_valid_p = false;
36903 break;
36908 if (error_p)
36910 const char *eprefix, *esuffix;
36912 ret = false;
36913 if (attr_p)
36915 eprefix = "__attribute__((__target__(";
36916 esuffix = ")))";
36918 else
36920 eprefix = "#pragma GCC target ";
36921 esuffix = "";
36924 if (cpu_opt)
36925 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36926 q, esuffix);
36927 else if (not_valid_p)
36928 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36929 else
36930 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36935 else if (TREE_CODE (args) == TREE_LIST)
36939 tree value = TREE_VALUE (args);
36940 if (value)
36942 bool ret2 = rs6000_inner_target_options (value, attr_p);
36943 if (!ret2)
36944 ret = false;
36946 args = TREE_CHAIN (args);
36948 while (args != NULL_TREE);
36951 else
36953 error ("attribute %<target%> argument not a string");
36954 return false;
36957 return ret;
36960 /* Print out the target options as a list for -mdebug=target. */
36962 static void
36963 rs6000_debug_target_options (tree args, const char *prefix)
36965 if (args == NULL_TREE)
36966 fprintf (stderr, "%s<NULL>", prefix);
36968 else if (TREE_CODE (args) == STRING_CST)
36970 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36971 char *q;
36973 while ((q = strtok (p, ",")) != NULL)
36975 p = NULL;
36976 fprintf (stderr, "%s\"%s\"", prefix, q);
36977 prefix = ", ";
36981 else if (TREE_CODE (args) == TREE_LIST)
36985 tree value = TREE_VALUE (args);
36986 if (value)
36988 rs6000_debug_target_options (value, prefix);
36989 prefix = ", ";
36991 args = TREE_CHAIN (args);
36993 while (args != NULL_TREE);
36996 else
36997 gcc_unreachable ();
36999 return;
37003 /* Hook to validate attribute((target("..."))). */
37005 static bool
37006 rs6000_valid_attribute_p (tree fndecl,
37007 tree ARG_UNUSED (name),
37008 tree args,
37009 int flags)
37011 struct cl_target_option cur_target;
37012 bool ret;
37013 tree old_optimize;
37014 tree new_target, new_optimize;
37015 tree func_optimize;
37017 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
37019 if (TARGET_DEBUG_TARGET)
37021 tree tname = DECL_NAME (fndecl);
37022 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
37023 if (tname)
37024 fprintf (stderr, "function: %.*s\n",
37025 (int) IDENTIFIER_LENGTH (tname),
37026 IDENTIFIER_POINTER (tname));
37027 else
37028 fprintf (stderr, "function: unknown\n");
37030 fprintf (stderr, "args:");
37031 rs6000_debug_target_options (args, " ");
37032 fprintf (stderr, "\n");
37034 if (flags)
37035 fprintf (stderr, "flags: 0x%x\n", flags);
37037 fprintf (stderr, "--------------------\n");
37040 /* attribute((target("default"))) does nothing, beyond
37041 affecting multi-versioning. */
37042 if (TREE_VALUE (args)
37043 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
37044 && TREE_CHAIN (args) == NULL_TREE
37045 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
37046 return true;
37048 old_optimize = build_optimization_node (&global_options);
37049 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
37051 /* If the function changed the optimization levels as well as setting target
37052 options, start with the optimizations specified. */
37053 if (func_optimize && func_optimize != old_optimize)
37054 cl_optimization_restore (&global_options,
37055 TREE_OPTIMIZATION (func_optimize));
37057 /* The target attributes may also change some optimization flags, so update
37058 the optimization options if necessary. */
37059 cl_target_option_save (&cur_target, &global_options);
37060 rs6000_cpu_index = rs6000_tune_index = -1;
37061 ret = rs6000_inner_target_options (args, true);
37063 /* Set up any additional state. */
37064 if (ret)
37066 ret = rs6000_option_override_internal (false);
37067 new_target = build_target_option_node (&global_options);
37069 else
37070 new_target = NULL;
37072 new_optimize = build_optimization_node (&global_options);
37074 if (!new_target)
37075 ret = false;
37077 else if (fndecl)
37079 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
37081 if (old_optimize != new_optimize)
37082 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
37085 cl_target_option_restore (&global_options, &cur_target);
37087 if (old_optimize != new_optimize)
37088 cl_optimization_restore (&global_options,
37089 TREE_OPTIMIZATION (old_optimize));
37091 return ret;
37095 /* Hook to validate the current #pragma GCC target and set the state, and
37096 update the macros based on what was changed. If ARGS is NULL, then
37097 POP_TARGET is used to reset the options. */
37099 bool
37100 rs6000_pragma_target_parse (tree args, tree pop_target)
37102 tree prev_tree = build_target_option_node (&global_options);
37103 tree cur_tree;
37104 struct cl_target_option *prev_opt, *cur_opt;
37105 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
37106 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
37108 if (TARGET_DEBUG_TARGET)
37110 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
37111 fprintf (stderr, "args:");
37112 rs6000_debug_target_options (args, " ");
37113 fprintf (stderr, "\n");
37115 if (pop_target)
37117 fprintf (stderr, "pop_target:\n");
37118 debug_tree (pop_target);
37120 else
37121 fprintf (stderr, "pop_target: <NULL>\n");
37123 fprintf (stderr, "--------------------\n");
37126 if (! args)
37128 cur_tree = ((pop_target)
37129 ? pop_target
37130 : target_option_default_node);
37131 cl_target_option_restore (&global_options,
37132 TREE_TARGET_OPTION (cur_tree));
37134 else
37136 rs6000_cpu_index = rs6000_tune_index = -1;
37137 if (!rs6000_inner_target_options (args, false)
37138 || !rs6000_option_override_internal (false)
37139 || (cur_tree = build_target_option_node (&global_options))
37140 == NULL_TREE)
37142 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37143 fprintf (stderr, "invalid pragma\n");
37145 return false;
37149 target_option_current_node = cur_tree;
37150 rs6000_activate_target_options (target_option_current_node);
37152 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37153 change the macros that are defined. */
37154 if (rs6000_target_modify_macros_ptr)
37156 prev_opt = TREE_TARGET_OPTION (prev_tree);
37157 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37158 prev_flags = prev_opt->x_rs6000_isa_flags;
37160 cur_opt = TREE_TARGET_OPTION (cur_tree);
37161 cur_flags = cur_opt->x_rs6000_isa_flags;
37162 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37164 diff_bumask = (prev_bumask ^ cur_bumask);
37165 diff_flags = (prev_flags ^ cur_flags);
37167 if ((diff_flags != 0) || (diff_bumask != 0))
37169 /* Delete old macros. */
37170 rs6000_target_modify_macros_ptr (false,
37171 prev_flags & diff_flags,
37172 prev_bumask & diff_bumask);
37174 /* Define new macros. */
37175 rs6000_target_modify_macros_ptr (true,
37176 cur_flags & diff_flags,
37177 cur_bumask & diff_bumask);
37181 return true;
37185 /* Remember the last target of rs6000_set_current_function. */
37186 static GTY(()) tree rs6000_previous_fndecl;
37188 /* Restore target's globals from NEW_TREE and invalidate the
37189 rs6000_previous_fndecl cache. */
37191 void
37192 rs6000_activate_target_options (tree new_tree)
37194 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37195 if (TREE_TARGET_GLOBALS (new_tree))
37196 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37197 else if (new_tree == target_option_default_node)
37198 restore_target_globals (&default_target_globals);
37199 else
37200 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37201 rs6000_previous_fndecl = NULL_TREE;
37204 /* Establish appropriate back-end context for processing the function
37205 FNDECL. The argument might be NULL to indicate processing at top
37206 level, outside of any function scope. */
37207 static void
37208 rs6000_set_current_function (tree fndecl)
37210 if (TARGET_DEBUG_TARGET)
37212 fprintf (stderr, "\n==================== rs6000_set_current_function");
37214 if (fndecl)
37215 fprintf (stderr, ", fndecl %s (%p)",
37216 (DECL_NAME (fndecl)
37217 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37218 : "<unknown>"), (void *)fndecl);
37220 if (rs6000_previous_fndecl)
37221 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37223 fprintf (stderr, "\n");
37226 /* Only change the context if the function changes. This hook is called
37227 several times in the course of compiling a function, and we don't want to
37228 slow things down too much or call target_reinit when it isn't safe. */
37229 if (fndecl == rs6000_previous_fndecl)
37230 return;
37232 tree old_tree;
37233 if (rs6000_previous_fndecl == NULL_TREE)
37234 old_tree = target_option_current_node;
37235 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37236 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37237 else
37238 old_tree = target_option_default_node;
37240 tree new_tree;
37241 if (fndecl == NULL_TREE)
37243 if (old_tree != target_option_current_node)
37244 new_tree = target_option_current_node;
37245 else
37246 new_tree = NULL_TREE;
37248 else
37250 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37251 if (new_tree == NULL_TREE)
37252 new_tree = target_option_default_node;
37255 if (TARGET_DEBUG_TARGET)
37257 if (new_tree)
37259 fprintf (stderr, "\nnew fndecl target specific options:\n");
37260 debug_tree (new_tree);
37263 if (old_tree)
37265 fprintf (stderr, "\nold fndecl target specific options:\n");
37266 debug_tree (old_tree);
37269 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37270 fprintf (stderr, "--------------------\n");
37273 if (new_tree && old_tree != new_tree)
37274 rs6000_activate_target_options (new_tree);
37276 if (fndecl)
37277 rs6000_previous_fndecl = fndecl;
37281 /* Save the current options */
37283 static void
37284 rs6000_function_specific_save (struct cl_target_option *ptr,
37285 struct gcc_options *opts)
37287 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37288 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37291 /* Restore the current options */
37293 static void
37294 rs6000_function_specific_restore (struct gcc_options *opts,
37295 struct cl_target_option *ptr)
37298 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37299 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37300 (void) rs6000_option_override_internal (false);
37303 /* Print the current options */
37305 static void
37306 rs6000_function_specific_print (FILE *file, int indent,
37307 struct cl_target_option *ptr)
37309 rs6000_print_isa_options (file, indent, "Isa options set",
37310 ptr->x_rs6000_isa_flags);
37312 rs6000_print_isa_options (file, indent, "Isa options explicit",
37313 ptr->x_rs6000_isa_flags_explicit);
37316 /* Helper function to print the current isa or misc options on a line. */
37318 static void
37319 rs6000_print_options_internal (FILE *file,
37320 int indent,
37321 const char *string,
37322 HOST_WIDE_INT flags,
37323 const char *prefix,
37324 const struct rs6000_opt_mask *opts,
37325 size_t num_elements)
37327 size_t i;
37328 size_t start_column = 0;
37329 size_t cur_column;
37330 size_t max_column = 120;
37331 size_t prefix_len = strlen (prefix);
37332 size_t comma_len = 0;
37333 const char *comma = "";
37335 if (indent)
37336 start_column += fprintf (file, "%*s", indent, "");
37338 if (!flags)
37340 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37341 return;
37344 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37346 /* Print the various mask options. */
37347 cur_column = start_column;
37348 for (i = 0; i < num_elements; i++)
37350 bool invert = opts[i].invert;
37351 const char *name = opts[i].name;
37352 const char *no_str = "";
37353 HOST_WIDE_INT mask = opts[i].mask;
37354 size_t len = comma_len + prefix_len + strlen (name);
37356 if (!invert)
37358 if ((flags & mask) == 0)
37360 no_str = "no-";
37361 len += sizeof ("no-") - 1;
37364 flags &= ~mask;
37367 else
37369 if ((flags & mask) != 0)
37371 no_str = "no-";
37372 len += sizeof ("no-") - 1;
37375 flags |= mask;
37378 cur_column += len;
37379 if (cur_column > max_column)
37381 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37382 cur_column = start_column + len;
37383 comma = "";
37386 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37387 comma = ", ";
37388 comma_len = sizeof (", ") - 1;
37391 fputs ("\n", file);
37394 /* Helper function to print the current isa options on a line. */
37396 static void
37397 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37398 HOST_WIDE_INT flags)
37400 rs6000_print_options_internal (file, indent, string, flags, "-m",
37401 &rs6000_opt_masks[0],
37402 ARRAY_SIZE (rs6000_opt_masks));
37405 static void
37406 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37407 HOST_WIDE_INT flags)
37409 rs6000_print_options_internal (file, indent, string, flags, "",
37410 &rs6000_builtin_mask_names[0],
37411 ARRAY_SIZE (rs6000_builtin_mask_names));
37414 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37415 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37416 -mupper-regs-df, etc.).
37418 If the user used -mno-power8-vector, we need to turn off all of the implicit
37419 ISA 2.07 and 3.0 options that relate to the vector unit.
37421 If the user used -mno-power9-vector, we need to turn off all of the implicit
37422 ISA 3.0 options that relate to the vector unit.
37424 This function does not handle explicit options such as the user specifying
37425 -mdirect-move. These are handled in rs6000_option_override_internal, and
37426 the appropriate error is given if needed.
37428 We return a mask of all of the implicit options that should not be enabled
37429 by default. */
37431 static HOST_WIDE_INT
37432 rs6000_disable_incompatible_switches (void)
37434 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37435 size_t i, j;
37437 static const struct {
37438 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37439 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37440 const char *const name; /* name of the switch. */
37441 } flags[] = {
37442 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37443 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37444 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37447 for (i = 0; i < ARRAY_SIZE (flags); i++)
37449 HOST_WIDE_INT no_flag = flags[i].no_flag;
37451 if ((rs6000_isa_flags & no_flag) == 0
37452 && (rs6000_isa_flags_explicit & no_flag) != 0)
37454 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37455 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37456 & rs6000_isa_flags
37457 & dep_flags);
37459 if (set_flags)
37461 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37462 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37464 set_flags &= ~rs6000_opt_masks[j].mask;
37465 error ("%<-mno-%s%> turns off %<-m%s%>",
37466 flags[i].name,
37467 rs6000_opt_masks[j].name);
37470 gcc_assert (!set_flags);
37473 rs6000_isa_flags &= ~dep_flags;
37474 ignore_masks |= no_flag | dep_flags;
37478 return ignore_masks;
37482 /* Helper function for printing the function name when debugging. */
37484 static const char *
37485 get_decl_name (tree fn)
37487 tree name;
37489 if (!fn)
37490 return "<null>";
37492 name = DECL_NAME (fn);
37493 if (!name)
37494 return "<no-name>";
37496 return IDENTIFIER_POINTER (name);
37499 /* Return the clone id of the target we are compiling code for in a target
37500 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37501 the priority list for the target clones (ordered from lowest to
37502 highest). */
37504 static int
37505 rs6000_clone_priority (tree fndecl)
37507 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37508 HOST_WIDE_INT isa_masks;
37509 int ret = CLONE_DEFAULT;
37510 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37511 const char *attrs_str = NULL;
37513 attrs = TREE_VALUE (TREE_VALUE (attrs));
37514 attrs_str = TREE_STRING_POINTER (attrs);
37516 /* Return priority zero for default function. Return the ISA needed for the
37517 function if it is not the default. */
37518 if (strcmp (attrs_str, "default") != 0)
37520 if (fn_opts == NULL_TREE)
37521 fn_opts = target_option_default_node;
37523 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37524 isa_masks = rs6000_isa_flags;
37525 else
37526 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37528 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37529 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37530 break;
37533 if (TARGET_DEBUG_TARGET)
37534 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37535 get_decl_name (fndecl), ret);
37537 return ret;
37540 /* This compares the priority of target features in function DECL1 and DECL2.
37541 It returns positive value if DECL1 is higher priority, negative value if
37542 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37543 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37545 static int
37546 rs6000_compare_version_priority (tree decl1, tree decl2)
37548 int priority1 = rs6000_clone_priority (decl1);
37549 int priority2 = rs6000_clone_priority (decl2);
37550 int ret = priority1 - priority2;
37552 if (TARGET_DEBUG_TARGET)
37553 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37554 get_decl_name (decl1), get_decl_name (decl2), ret);
37556 return ret;
37559 /* Make a dispatcher declaration for the multi-versioned function DECL.
37560 Calls to DECL function will be replaced with calls to the dispatcher
37561 by the front-end. Returns the decl of the dispatcher function. */
37563 static tree
37564 rs6000_get_function_versions_dispatcher (void *decl)
37566 tree fn = (tree) decl;
37567 struct cgraph_node *node = NULL;
37568 struct cgraph_node *default_node = NULL;
37569 struct cgraph_function_version_info *node_v = NULL;
37570 struct cgraph_function_version_info *first_v = NULL;
37572 tree dispatch_decl = NULL;
37574 struct cgraph_function_version_info *default_version_info = NULL;
37575 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37577 if (TARGET_DEBUG_TARGET)
37578 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37579 get_decl_name (fn));
37581 node = cgraph_node::get (fn);
37582 gcc_assert (node != NULL);
37584 node_v = node->function_version ();
37585 gcc_assert (node_v != NULL);
37587 if (node_v->dispatcher_resolver != NULL)
37588 return node_v->dispatcher_resolver;
37590 /* Find the default version and make it the first node. */
37591 first_v = node_v;
37592 /* Go to the beginning of the chain. */
37593 while (first_v->prev != NULL)
37594 first_v = first_v->prev;
37596 default_version_info = first_v;
37597 while (default_version_info != NULL)
37599 const tree decl2 = default_version_info->this_node->decl;
37600 if (is_function_default_version (decl2))
37601 break;
37602 default_version_info = default_version_info->next;
37605 /* If there is no default node, just return NULL. */
37606 if (default_version_info == NULL)
37607 return NULL;
37609 /* Make default info the first node. */
37610 if (first_v != default_version_info)
37612 default_version_info->prev->next = default_version_info->next;
37613 if (default_version_info->next)
37614 default_version_info->next->prev = default_version_info->prev;
37615 first_v->prev = default_version_info;
37616 default_version_info->next = first_v;
37617 default_version_info->prev = NULL;
37620 default_node = default_version_info->this_node;
37622 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37623 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37624 "target_clones attribute needs GLIBC (2.23 and newer) that "
37625 "exports hardware capability bits");
37626 #else
37628 if (targetm.has_ifunc_p ())
37630 struct cgraph_function_version_info *it_v = NULL;
37631 struct cgraph_node *dispatcher_node = NULL;
37632 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37634 /* Right now, the dispatching is done via ifunc. */
37635 dispatch_decl = make_dispatcher_decl (default_node->decl);
37637 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37638 gcc_assert (dispatcher_node != NULL);
37639 dispatcher_node->dispatcher_function = 1;
37640 dispatcher_version_info
37641 = dispatcher_node->insert_new_function_version ();
37642 dispatcher_version_info->next = default_version_info;
37643 dispatcher_node->definition = 1;
37645 /* Set the dispatcher for all the versions. */
37646 it_v = default_version_info;
37647 while (it_v != NULL)
37649 it_v->dispatcher_resolver = dispatch_decl;
37650 it_v = it_v->next;
37653 else
37655 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37656 "multiversioning needs ifunc which is not supported "
37657 "on this target");
37659 #endif
37661 return dispatch_decl;
37664 /* Make the resolver function decl to dispatch the versions of a multi-
37665 versioned function, DEFAULT_DECL. Create an empty basic block in the
37666 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37667 function. */
37669 static tree
37670 make_resolver_func (const tree default_decl,
37671 const tree dispatch_decl,
37672 basic_block *empty_bb)
37674 /* Make the resolver function static. The resolver function returns
37675 void *. */
37676 tree decl_name = clone_function_name (default_decl, "resolver");
37677 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37678 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37679 tree decl = build_fn_decl (resolver_name, type);
37680 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37682 DECL_NAME (decl) = decl_name;
37683 TREE_USED (decl) = 1;
37684 DECL_ARTIFICIAL (decl) = 1;
37685 DECL_IGNORED_P (decl) = 0;
37686 TREE_PUBLIC (decl) = 0;
37687 DECL_UNINLINABLE (decl) = 1;
37689 /* Resolver is not external, body is generated. */
37690 DECL_EXTERNAL (decl) = 0;
37691 DECL_EXTERNAL (dispatch_decl) = 0;
37693 DECL_CONTEXT (decl) = NULL_TREE;
37694 DECL_INITIAL (decl) = make_node (BLOCK);
37695 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37697 /* Build result decl and add to function_decl. */
37698 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37699 DECL_ARTIFICIAL (t) = 1;
37700 DECL_IGNORED_P (t) = 1;
37701 DECL_RESULT (decl) = t;
37703 gimplify_function_tree (decl);
37704 push_cfun (DECL_STRUCT_FUNCTION (decl));
37705 *empty_bb = init_lowered_empty_function (decl, false,
37706 profile_count::uninitialized ());
37708 cgraph_node::add_new_function (decl, true);
37709 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37711 pop_cfun ();
37713 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37714 DECL_ATTRIBUTES (dispatch_decl)
37715 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37717 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37719 return decl;
37722 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37723 return a pointer to VERSION_DECL if we are running on a machine that
37724 supports the index CLONE_ISA hardware architecture bits. This function will
37725 be called during version dispatch to decide which function version to
37726 execute. It returns the basic block at the end, to which more conditions
37727 can be added. */
37729 static basic_block
37730 add_condition_to_bb (tree function_decl, tree version_decl,
37731 int clone_isa, basic_block new_bb)
37733 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37735 gcc_assert (new_bb != NULL);
37736 gimple_seq gseq = bb_seq (new_bb);
37739 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37740 build_fold_addr_expr (version_decl));
37741 tree result_var = create_tmp_var (ptr_type_node);
37742 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37743 gimple *return_stmt = gimple_build_return (result_var);
37745 if (clone_isa == CLONE_DEFAULT)
37747 gimple_seq_add_stmt (&gseq, convert_stmt);
37748 gimple_seq_add_stmt (&gseq, return_stmt);
37749 set_bb_seq (new_bb, gseq);
37750 gimple_set_bb (convert_stmt, new_bb);
37751 gimple_set_bb (return_stmt, new_bb);
37752 pop_cfun ();
37753 return new_bb;
37756 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37757 tree cond_var = create_tmp_var (bool_int_type_node);
37758 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37759 const char *arg_str = rs6000_clone_map[clone_isa].name;
37760 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37761 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37762 gimple_call_set_lhs (call_cond_stmt, cond_var);
37764 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37765 gimple_set_bb (call_cond_stmt, new_bb);
37766 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37768 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37769 NULL_TREE, NULL_TREE);
37770 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37771 gimple_set_bb (if_else_stmt, new_bb);
37772 gimple_seq_add_stmt (&gseq, if_else_stmt);
37774 gimple_seq_add_stmt (&gseq, convert_stmt);
37775 gimple_seq_add_stmt (&gseq, return_stmt);
37776 set_bb_seq (new_bb, gseq);
37778 basic_block bb1 = new_bb;
37779 edge e12 = split_block (bb1, if_else_stmt);
37780 basic_block bb2 = e12->dest;
37781 e12->flags &= ~EDGE_FALLTHRU;
37782 e12->flags |= EDGE_TRUE_VALUE;
37784 edge e23 = split_block (bb2, return_stmt);
37785 gimple_set_bb (convert_stmt, bb2);
37786 gimple_set_bb (return_stmt, bb2);
37788 basic_block bb3 = e23->dest;
37789 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37791 remove_edge (e23);
37792 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37794 pop_cfun ();
37795 return bb3;
37798 /* This function generates the dispatch function for multi-versioned functions.
37799 DISPATCH_DECL is the function which will contain the dispatch logic.
37800 FNDECLS are the function choices for dispatch, and is a tree chain.
37801 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37802 code is generated. */
37804 static int
37805 dispatch_function_versions (tree dispatch_decl,
37806 void *fndecls_p,
37807 basic_block *empty_bb)
37809 int ix;
37810 tree ele;
37811 vec<tree> *fndecls;
37812 tree clones[CLONE_MAX];
37814 if (TARGET_DEBUG_TARGET)
37815 fputs ("dispatch_function_versions, top\n", stderr);
37817 gcc_assert (dispatch_decl != NULL
37818 && fndecls_p != NULL
37819 && empty_bb != NULL);
37821 /* fndecls_p is actually a vector. */
37822 fndecls = static_cast<vec<tree> *> (fndecls_p);
37824 /* At least one more version other than the default. */
37825 gcc_assert (fndecls->length () >= 2);
37827 /* The first version in the vector is the default decl. */
37828 memset ((void *) clones, '\0', sizeof (clones));
37829 clones[CLONE_DEFAULT] = (*fndecls)[0];
37831 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37832 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37833 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37834 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37835 to insert the code here to do the call. */
37837 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37839 int priority = rs6000_clone_priority (ele);
37840 if (!clones[priority])
37841 clones[priority] = ele;
37844 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37845 if (clones[ix])
37847 if (TARGET_DEBUG_TARGET)
37848 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37849 ix, get_decl_name (clones[ix]));
37851 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37852 *empty_bb);
37855 return 0;
37858 /* Generate the dispatching code body to dispatch multi-versioned function
37859 DECL. The target hook is called to process the "target" attributes and
37860 provide the code to dispatch the right function at run-time. NODE points
37861 to the dispatcher decl whose body will be created. */
37863 static tree
37864 rs6000_generate_version_dispatcher_body (void *node_p)
37866 tree resolver;
37867 basic_block empty_bb;
37868 struct cgraph_node *node = (cgraph_node *) node_p;
37869 struct cgraph_function_version_info *ninfo = node->function_version ();
37871 if (ninfo->dispatcher_resolver)
37872 return ninfo->dispatcher_resolver;
37874 /* node is going to be an alias, so remove the finalized bit. */
37875 node->definition = false;
37877 /* The first version in the chain corresponds to the default version. */
37878 ninfo->dispatcher_resolver = resolver
37879 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37881 if (TARGET_DEBUG_TARGET)
37882 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37883 get_decl_name (resolver));
37885 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37886 auto_vec<tree, 2> fn_ver_vec;
37888 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37889 vinfo;
37890 vinfo = vinfo->next)
37892 struct cgraph_node *version = vinfo->this_node;
37893 /* Check for virtual functions here again, as by this time it should
37894 have been determined if this function needs a vtable index or
37895 not. This happens for methods in derived classes that override
37896 virtual methods in base classes but are not explicitly marked as
37897 virtual. */
37898 if (DECL_VINDEX (version->decl))
37899 sorry ("Virtual function multiversioning not supported");
37901 fn_ver_vec.safe_push (version->decl);
37904 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37905 cgraph_edge::rebuild_edges ();
37906 pop_cfun ();
37907 return resolver;
37911 /* Hook to determine if one function can safely inline another. */
37913 static bool
37914 rs6000_can_inline_p (tree caller, tree callee)
37916 bool ret = false;
37917 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37918 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37920 /* If callee has no option attributes, then it is ok to inline. */
37921 if (!callee_tree)
37922 ret = true;
37924 /* If caller has no option attributes, but callee does then it is not ok to
37925 inline. */
37926 else if (!caller_tree)
37927 ret = false;
37929 else
37931 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37932 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37934 /* Callee's options should a subset of the caller's, i.e. a vsx function
37935 can inline an altivec function but a non-vsx function can't inline a
37936 vsx function. */
37937 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37938 == callee_opts->x_rs6000_isa_flags)
37939 ret = true;
37942 if (TARGET_DEBUG_TARGET)
37943 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37944 get_decl_name (caller), get_decl_name (callee),
37945 (ret ? "can" : "cannot"));
37947 return ret;
37950 /* Allocate a stack temp and fixup the address so it meets the particular
37951 memory requirements (either offetable or REG+REG addressing). */
37954 rs6000_allocate_stack_temp (machine_mode mode,
37955 bool offsettable_p,
37956 bool reg_reg_p)
37958 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37959 rtx addr = XEXP (stack, 0);
37960 int strict_p = reload_completed;
37962 if (!legitimate_indirect_address_p (addr, strict_p))
37964 if (offsettable_p
37965 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37966 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37968 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37969 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37972 return stack;
37975 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37976 to such a form to deal with memory reference instructions like STFIWX that
37977 only take reg+reg addressing. */
37980 rs6000_address_for_fpconvert (rtx x)
37982 rtx addr;
37984 gcc_assert (MEM_P (x));
37985 addr = XEXP (x, 0);
37986 if (can_create_pseudo_p ()
37987 && ! legitimate_indirect_address_p (addr, reload_completed)
37988 && ! legitimate_indexed_address_p (addr, reload_completed))
37990 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37992 rtx reg = XEXP (addr, 0);
37993 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37994 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37995 gcc_assert (REG_P (reg));
37996 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37997 addr = reg;
37999 else if (GET_CODE (addr) == PRE_MODIFY)
38001 rtx reg = XEXP (addr, 0);
38002 rtx expr = XEXP (addr, 1);
38003 gcc_assert (REG_P (reg));
38004 gcc_assert (GET_CODE (expr) == PLUS);
38005 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
38006 addr = reg;
38009 x = replace_equiv_address (x, copy_addr_to_reg (addr));
38012 return x;
38015 /* Given a memory reference, if it is not in the form for altivec memory
38016 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
38017 convert to the altivec format. */
38020 rs6000_address_for_altivec (rtx x)
38022 gcc_assert (MEM_P (x));
38023 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
38025 rtx addr = XEXP (x, 0);
38027 if (!legitimate_indexed_address_p (addr, reload_completed)
38028 && !legitimate_indirect_address_p (addr, reload_completed))
38029 addr = copy_to_mode_reg (Pmode, addr);
38031 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
38032 x = change_address (x, GET_MODE (x), addr);
38035 return x;
38038 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
38040 On the RS/6000, all integer constants are acceptable, most won't be valid
38041 for particular insns, though. Only easy FP constants are acceptable. */
38043 static bool
38044 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
38046 if (TARGET_ELF && tls_referenced_p (x))
38047 return false;
38049 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
38050 || GET_MODE (x) == VOIDmode
38051 || (TARGET_POWERPC64 && mode == DImode)
38052 || easy_fp_constant (x, mode)
38053 || easy_vector_constant (x, mode));
38057 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
38059 static bool
38060 chain_already_loaded (rtx_insn *last)
38062 for (; last != NULL; last = PREV_INSN (last))
38064 if (NONJUMP_INSN_P (last))
38066 rtx patt = PATTERN (last);
38068 if (GET_CODE (patt) == SET)
38070 rtx lhs = XEXP (patt, 0);
38072 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
38073 return true;
38077 return false;
38080 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
38082 void
38083 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38085 const bool direct_call_p
38086 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
38087 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
38088 rtx toc_load = NULL_RTX;
38089 rtx toc_restore = NULL_RTX;
38090 rtx func_addr;
38091 rtx abi_reg = NULL_RTX;
38092 rtx call[4];
38093 int n_call;
38094 rtx insn;
38096 /* Handle longcall attributes. */
38097 if (INTVAL (cookie) & CALL_LONG)
38098 func_desc = rs6000_longcall_ref (func_desc);
38100 /* Handle indirect calls. */
38101 if (GET_CODE (func_desc) != SYMBOL_REF
38102 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
38104 /* Save the TOC into its reserved slot before the call,
38105 and prepare to restore it after the call. */
38106 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
38107 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
38108 rtx stack_toc_mem = gen_frame_mem (Pmode,
38109 gen_rtx_PLUS (Pmode, stack_ptr,
38110 stack_toc_offset));
38111 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
38112 gen_rtvec (1, stack_toc_offset),
38113 UNSPEC_TOCSLOT);
38114 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
38116 /* Can we optimize saving the TOC in the prologue or
38117 do we need to do it at every call? */
38118 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
38119 cfun->machine->save_toc_in_prologue = true;
38120 else
38122 MEM_VOLATILE_P (stack_toc_mem) = 1;
38123 emit_move_insn (stack_toc_mem, toc_reg);
38126 if (DEFAULT_ABI == ABI_ELFv2)
38128 /* A function pointer in the ELFv2 ABI is just a plain address, but
38129 the ABI requires it to be loaded into r12 before the call. */
38130 func_addr = gen_rtx_REG (Pmode, 12);
38131 emit_move_insn (func_addr, func_desc);
38132 abi_reg = func_addr;
38134 else
38136 /* A function pointer under AIX is a pointer to a data area whose
38137 first word contains the actual address of the function, whose
38138 second word contains a pointer to its TOC, and whose third word
38139 contains a value to place in the static chain register (r11).
38140 Note that if we load the static chain, our "trampoline" need
38141 not have any executable code. */
38143 /* Load up address of the actual function. */
38144 func_desc = force_reg (Pmode, func_desc);
38145 func_addr = gen_reg_rtx (Pmode);
38146 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
38148 /* Prepare to load the TOC of the called function. Note that the
38149 TOC load must happen immediately before the actual call so
38150 that unwinding the TOC registers works correctly. See the
38151 comment in frob_update_context. */
38152 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38153 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38154 gen_rtx_PLUS (Pmode, func_desc,
38155 func_toc_offset));
38156 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38158 /* If we have a static chain, load it up. But, if the call was
38159 originally direct, the 3rd word has not been written since no
38160 trampoline has been built, so we ought not to load it, lest we
38161 override a static chain value. */
38162 if (!direct_call_p
38163 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38164 && !chain_already_loaded (get_current_sequence ()->next->last))
38166 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38167 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38168 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38169 gen_rtx_PLUS (Pmode, func_desc,
38170 func_sc_offset));
38171 emit_move_insn (sc_reg, func_sc_mem);
38172 abi_reg = sc_reg;
38176 else
38178 /* Direct calls use the TOC: for local calls, the callee will
38179 assume the TOC register is set; for non-local calls, the
38180 PLT stub needs the TOC register. */
38181 abi_reg = toc_reg;
38182 func_addr = func_desc;
38185 /* Create the call. */
38186 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
38187 if (value != NULL_RTX)
38188 call[0] = gen_rtx_SET (value, call[0]);
38189 n_call = 1;
38191 if (toc_load)
38192 call[n_call++] = toc_load;
38193 if (toc_restore)
38194 call[n_call++] = toc_restore;
38196 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38198 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38199 insn = emit_call_insn (insn);
38201 /* Mention all registers defined by the ABI to hold information
38202 as uses in CALL_INSN_FUNCTION_USAGE. */
38203 if (abi_reg)
38204 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38207 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38209 void
38210 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38212 rtx call[2];
38213 rtx insn;
38215 gcc_assert (INTVAL (cookie) == 0);
38217 /* Create the call. */
38218 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
38219 if (value != NULL_RTX)
38220 call[0] = gen_rtx_SET (value, call[0]);
38222 call[1] = simple_return_rtx;
38224 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38225 insn = emit_call_insn (insn);
38227 /* Note use of the TOC register. */
38228 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38231 /* Return whether we need to always update the saved TOC pointer when we update
38232 the stack pointer. */
38234 static bool
38235 rs6000_save_toc_in_prologue_p (void)
38237 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38240 #ifdef HAVE_GAS_HIDDEN
38241 # define USE_HIDDEN_LINKONCE 1
38242 #else
38243 # define USE_HIDDEN_LINKONCE 0
38244 #endif
38246 /* Fills in the label name that should be used for a 476 link stack thunk. */
38248 void
38249 get_ppc476_thunk_name (char name[32])
38251 gcc_assert (TARGET_LINK_STACK);
38253 if (USE_HIDDEN_LINKONCE)
38254 sprintf (name, "__ppc476.get_thunk");
38255 else
38256 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38259 /* This function emits the simple thunk routine that is used to preserve
38260 the link stack on the 476 cpu. */
38262 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38263 static void
38264 rs6000_code_end (void)
38266 char name[32];
38267 tree decl;
38269 if (!TARGET_LINK_STACK)
38270 return;
38272 get_ppc476_thunk_name (name);
38274 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38275 build_function_type_list (void_type_node, NULL_TREE));
38276 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38277 NULL_TREE, void_type_node);
38278 TREE_PUBLIC (decl) = 1;
38279 TREE_STATIC (decl) = 1;
38281 #if RS6000_WEAK
38282 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38284 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38285 targetm.asm_out.unique_section (decl, 0);
38286 switch_to_section (get_named_section (decl, NULL, 0));
38287 DECL_WEAK (decl) = 1;
38288 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38289 targetm.asm_out.globalize_label (asm_out_file, name);
38290 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38291 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38293 else
38294 #endif
38296 switch_to_section (text_section);
38297 ASM_OUTPUT_LABEL (asm_out_file, name);
38300 DECL_INITIAL (decl) = make_node (BLOCK);
38301 current_function_decl = decl;
38302 allocate_struct_function (decl, false);
38303 init_function_start (decl);
38304 first_function_block_is_cold = false;
38305 /* Make sure unwind info is emitted for the thunk if needed. */
38306 final_start_function (emit_barrier (), asm_out_file, 1);
38308 fputs ("\tblr\n", asm_out_file);
38310 final_end_function ();
38311 init_insn_lengths ();
38312 free_after_compilation (cfun);
38313 set_cfun (NULL);
38314 current_function_decl = NULL;
38317 /* Add r30 to hard reg set if the prologue sets it up and it is not
38318 pic_offset_table_rtx. */
38320 static void
38321 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38323 if (!TARGET_SINGLE_PIC_BASE
38324 && TARGET_TOC
38325 && TARGET_MINIMAL_TOC
38326 && !constant_pool_empty_p ())
38327 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38328 if (cfun->machine->split_stack_argp_used)
38329 add_to_hard_reg_set (&set->set, Pmode, 12);
38331 /* Make sure the hard reg set doesn't include r2, which was possibly added
38332 via PIC_OFFSET_TABLE_REGNUM. */
38333 if (TARGET_TOC)
38334 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38338 /* Helper function for rs6000_split_logical to emit a logical instruction after
38339 spliting the operation to single GPR registers.
38341 DEST is the destination register.
38342 OP1 and OP2 are the input source registers.
38343 CODE is the base operation (AND, IOR, XOR, NOT).
38344 MODE is the machine mode.
38345 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38346 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38347 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38349 static void
38350 rs6000_split_logical_inner (rtx dest,
38351 rtx op1,
38352 rtx op2,
38353 enum rtx_code code,
38354 machine_mode mode,
38355 bool complement_final_p,
38356 bool complement_op1_p,
38357 bool complement_op2_p)
38359 rtx bool_rtx;
38361 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38362 if (op2 && GET_CODE (op2) == CONST_INT
38363 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38364 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38366 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38367 HOST_WIDE_INT value = INTVAL (op2) & mask;
38369 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38370 if (code == AND)
38372 if (value == 0)
38374 emit_insn (gen_rtx_SET (dest, const0_rtx));
38375 return;
38378 else if (value == mask)
38380 if (!rtx_equal_p (dest, op1))
38381 emit_insn (gen_rtx_SET (dest, op1));
38382 return;
38386 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38387 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38388 else if (code == IOR || code == XOR)
38390 if (value == 0)
38392 if (!rtx_equal_p (dest, op1))
38393 emit_insn (gen_rtx_SET (dest, op1));
38394 return;
38399 if (code == AND && mode == SImode
38400 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38402 emit_insn (gen_andsi3 (dest, op1, op2));
38403 return;
38406 if (complement_op1_p)
38407 op1 = gen_rtx_NOT (mode, op1);
38409 if (complement_op2_p)
38410 op2 = gen_rtx_NOT (mode, op2);
38412 /* For canonical RTL, if only one arm is inverted it is the first. */
38413 if (!complement_op1_p && complement_op2_p)
38414 std::swap (op1, op2);
38416 bool_rtx = ((code == NOT)
38417 ? gen_rtx_NOT (mode, op1)
38418 : gen_rtx_fmt_ee (code, mode, op1, op2));
38420 if (complement_final_p)
38421 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38423 emit_insn (gen_rtx_SET (dest, bool_rtx));
38426 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38427 operations are split immediately during RTL generation to allow for more
38428 optimizations of the AND/IOR/XOR.
38430 OPERANDS is an array containing the destination and two input operands.
38431 CODE is the base operation (AND, IOR, XOR, NOT).
38432 MODE is the machine mode.
38433 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38434 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38435 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38436 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38437 formation of the AND instructions. */
38439 static void
38440 rs6000_split_logical_di (rtx operands[3],
38441 enum rtx_code code,
38442 bool complement_final_p,
38443 bool complement_op1_p,
38444 bool complement_op2_p)
38446 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38447 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38448 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38449 enum hi_lo { hi = 0, lo = 1 };
38450 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38451 size_t i;
38453 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38454 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38455 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38456 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38458 if (code == NOT)
38459 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38460 else
38462 if (GET_CODE (operands[2]) != CONST_INT)
38464 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38465 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38467 else
38469 HOST_WIDE_INT value = INTVAL (operands[2]);
38470 HOST_WIDE_INT value_hi_lo[2];
38472 gcc_assert (!complement_final_p);
38473 gcc_assert (!complement_op1_p);
38474 gcc_assert (!complement_op2_p);
38476 value_hi_lo[hi] = value >> 32;
38477 value_hi_lo[lo] = value & lower_32bits;
38479 for (i = 0; i < 2; i++)
38481 HOST_WIDE_INT sub_value = value_hi_lo[i];
38483 if (sub_value & sign_bit)
38484 sub_value |= upper_32bits;
38486 op2_hi_lo[i] = GEN_INT (sub_value);
38488 /* If this is an AND instruction, check to see if we need to load
38489 the value in a register. */
38490 if (code == AND && sub_value != -1 && sub_value != 0
38491 && !and_operand (op2_hi_lo[i], SImode))
38492 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38497 for (i = 0; i < 2; i++)
38499 /* Split large IOR/XOR operations. */
38500 if ((code == IOR || code == XOR)
38501 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38502 && !complement_final_p
38503 && !complement_op1_p
38504 && !complement_op2_p
38505 && !logical_const_operand (op2_hi_lo[i], SImode))
38507 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38508 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38509 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38510 rtx tmp = gen_reg_rtx (SImode);
38512 /* Make sure the constant is sign extended. */
38513 if ((hi_16bits & sign_bit) != 0)
38514 hi_16bits |= upper_32bits;
38516 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38517 code, SImode, false, false, false);
38519 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38520 code, SImode, false, false, false);
38522 else
38523 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38524 code, SImode, complement_final_p,
38525 complement_op1_p, complement_op2_p);
38528 return;
38531 /* Split the insns that make up boolean operations operating on multiple GPR
38532 registers. The boolean MD patterns ensure that the inputs either are
38533 exactly the same as the output registers, or there is no overlap.
38535 OPERANDS is an array containing the destination and two input operands.
38536 CODE is the base operation (AND, IOR, XOR, NOT).
38537 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38538 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38539 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38541 void
38542 rs6000_split_logical (rtx operands[3],
38543 enum rtx_code code,
38544 bool complement_final_p,
38545 bool complement_op1_p,
38546 bool complement_op2_p)
38548 machine_mode mode = GET_MODE (operands[0]);
38549 machine_mode sub_mode;
38550 rtx op0, op1, op2;
38551 int sub_size, regno0, regno1, nregs, i;
38553 /* If this is DImode, use the specialized version that can run before
38554 register allocation. */
38555 if (mode == DImode && !TARGET_POWERPC64)
38557 rs6000_split_logical_di (operands, code, complement_final_p,
38558 complement_op1_p, complement_op2_p);
38559 return;
38562 op0 = operands[0];
38563 op1 = operands[1];
38564 op2 = (code == NOT) ? NULL_RTX : operands[2];
38565 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38566 sub_size = GET_MODE_SIZE (sub_mode);
38567 regno0 = REGNO (op0);
38568 regno1 = REGNO (op1);
38570 gcc_assert (reload_completed);
38571 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38572 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38574 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38575 gcc_assert (nregs > 1);
38577 if (op2 && REG_P (op2))
38578 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38580 for (i = 0; i < nregs; i++)
38582 int offset = i * sub_size;
38583 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38584 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38585 rtx sub_op2 = ((code == NOT)
38586 ? NULL_RTX
38587 : simplify_subreg (sub_mode, op2, mode, offset));
38589 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38590 complement_final_p, complement_op1_p,
38591 complement_op2_p);
38594 return;
38598 /* Return true if the peephole2 can combine a load involving a combination of
38599 an addis instruction and a load with an offset that can be fused together on
38600 a power8. */
38602 bool
38603 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38604 rtx addis_value, /* addis value. */
38605 rtx target, /* target register that is loaded. */
38606 rtx mem) /* bottom part of the memory addr. */
38608 rtx addr;
38609 rtx base_reg;
38611 /* Validate arguments. */
38612 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38613 return false;
38615 if (!base_reg_operand (target, GET_MODE (target)))
38616 return false;
38618 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38619 return false;
38621 /* Allow sign/zero extension. */
38622 if (GET_CODE (mem) == ZERO_EXTEND
38623 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38624 mem = XEXP (mem, 0);
38626 if (!MEM_P (mem))
38627 return false;
38629 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38630 return false;
38632 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38633 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38634 return false;
38636 /* Validate that the register used to load the high value is either the
38637 register being loaded, or we can safely replace its use.
38639 This function is only called from the peephole2 pass and we assume that
38640 there are 2 instructions in the peephole (addis and load), so we want to
38641 check if the target register was not used in the memory address and the
38642 register to hold the addis result is dead after the peephole. */
38643 if (REGNO (addis_reg) != REGNO (target))
38645 if (reg_mentioned_p (target, mem))
38646 return false;
38648 if (!peep2_reg_dead_p (2, addis_reg))
38649 return false;
38651 /* If the target register being loaded is the stack pointer, we must
38652 avoid loading any other value into it, even temporarily. */
38653 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38654 return false;
38657 base_reg = XEXP (addr, 0);
38658 return REGNO (addis_reg) == REGNO (base_reg);
38661 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38662 sequence. We adjust the addis register to use the target register. If the
38663 load sign extends, we adjust the code to do the zero extending load, and an
38664 explicit sign extension later since the fusion only covers zero extending
38665 loads.
38667 The operands are:
38668 operands[0] register set with addis (to be replaced with target)
38669 operands[1] value set via addis
38670 operands[2] target register being loaded
38671 operands[3] D-form memory reference using operands[0]. */
38673 void
38674 expand_fusion_gpr_load (rtx *operands)
38676 rtx addis_value = operands[1];
38677 rtx target = operands[2];
38678 rtx orig_mem = operands[3];
38679 rtx new_addr, new_mem, orig_addr, offset;
38680 enum rtx_code plus_or_lo_sum;
38681 machine_mode target_mode = GET_MODE (target);
38682 machine_mode extend_mode = target_mode;
38683 machine_mode ptr_mode = Pmode;
38684 enum rtx_code extend = UNKNOWN;
38686 if (GET_CODE (orig_mem) == ZERO_EXTEND
38687 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38689 extend = GET_CODE (orig_mem);
38690 orig_mem = XEXP (orig_mem, 0);
38691 target_mode = GET_MODE (orig_mem);
38694 gcc_assert (MEM_P (orig_mem));
38696 orig_addr = XEXP (orig_mem, 0);
38697 plus_or_lo_sum = GET_CODE (orig_addr);
38698 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38700 offset = XEXP (orig_addr, 1);
38701 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38702 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38704 if (extend != UNKNOWN)
38705 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38707 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38708 UNSPEC_FUSION_GPR);
38709 emit_insn (gen_rtx_SET (target, new_mem));
38711 if (extend == SIGN_EXTEND)
38713 int sub_off = ((BYTES_BIG_ENDIAN)
38714 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38715 : 0);
38716 rtx sign_reg
38717 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38719 emit_insn (gen_rtx_SET (target,
38720 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38723 return;
38726 /* Emit the addis instruction that will be part of a fused instruction
38727 sequence. */
38729 void
38730 emit_fusion_addis (rtx target, rtx addis_value)
38732 rtx fuse_ops[10];
38733 const char *addis_str = NULL;
38735 /* Emit the addis instruction. */
38736 fuse_ops[0] = target;
38737 if (satisfies_constraint_L (addis_value))
38739 fuse_ops[1] = addis_value;
38740 addis_str = "lis %0,%v1";
38743 else if (GET_CODE (addis_value) == PLUS)
38745 rtx op0 = XEXP (addis_value, 0);
38746 rtx op1 = XEXP (addis_value, 1);
38748 if (REG_P (op0) && CONST_INT_P (op1)
38749 && satisfies_constraint_L (op1))
38751 fuse_ops[1] = op0;
38752 fuse_ops[2] = op1;
38753 addis_str = "addis %0,%1,%v2";
38757 else if (GET_CODE (addis_value) == HIGH)
38759 rtx value = XEXP (addis_value, 0);
38760 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38762 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38763 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38764 if (TARGET_ELF)
38765 addis_str = "addis %0,%2,%1@toc@ha";
38767 else if (TARGET_XCOFF)
38768 addis_str = "addis %0,%1@u(%2)";
38770 else
38771 gcc_unreachable ();
38774 else if (GET_CODE (value) == PLUS)
38776 rtx op0 = XEXP (value, 0);
38777 rtx op1 = XEXP (value, 1);
38779 if (GET_CODE (op0) == UNSPEC
38780 && XINT (op0, 1) == UNSPEC_TOCREL
38781 && CONST_INT_P (op1))
38783 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38784 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38785 fuse_ops[3] = op1;
38786 if (TARGET_ELF)
38787 addis_str = "addis %0,%2,%1+%3@toc@ha";
38789 else if (TARGET_XCOFF)
38790 addis_str = "addis %0,%1+%3@u(%2)";
38792 else
38793 gcc_unreachable ();
38797 else if (satisfies_constraint_L (value))
38799 fuse_ops[1] = value;
38800 addis_str = "lis %0,%v1";
38803 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38805 fuse_ops[1] = value;
38806 addis_str = "lis %0,%1@ha";
38810 if (!addis_str)
38811 fatal_insn ("Could not generate addis value for fusion", addis_value);
38813 output_asm_insn (addis_str, fuse_ops);
38816 /* Emit a D-form load or store instruction that is the second instruction
38817 of a fusion sequence. */
38819 void
38820 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38821 const char *insn_str)
38823 rtx fuse_ops[10];
38824 char insn_template[80];
38826 fuse_ops[0] = load_store_reg;
38827 fuse_ops[1] = addis_reg;
38829 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38831 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38832 fuse_ops[2] = offset;
38833 output_asm_insn (insn_template, fuse_ops);
38836 else if (GET_CODE (offset) == UNSPEC
38837 && XINT (offset, 1) == UNSPEC_TOCREL)
38839 if (TARGET_ELF)
38840 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38842 else if (TARGET_XCOFF)
38843 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38845 else
38846 gcc_unreachable ();
38848 fuse_ops[2] = XVECEXP (offset, 0, 0);
38849 output_asm_insn (insn_template, fuse_ops);
38852 else if (GET_CODE (offset) == PLUS
38853 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38854 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38855 && CONST_INT_P (XEXP (offset, 1)))
38857 rtx tocrel_unspec = XEXP (offset, 0);
38858 if (TARGET_ELF)
38859 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38861 else if (TARGET_XCOFF)
38862 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38864 else
38865 gcc_unreachable ();
38867 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38868 fuse_ops[3] = XEXP (offset, 1);
38869 output_asm_insn (insn_template, fuse_ops);
38872 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38874 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38876 fuse_ops[2] = offset;
38877 output_asm_insn (insn_template, fuse_ops);
38880 else
38881 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38883 return;
38886 /* Wrap a TOC address that can be fused to indicate that special fusion
38887 processing is needed. */
38890 fusion_wrap_memory_address (rtx old_mem)
38892 rtx old_addr = XEXP (old_mem, 0);
38893 rtvec v = gen_rtvec (1, old_addr);
38894 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38895 return replace_equiv_address_nv (old_mem, new_addr, false);
38898 /* Given an address, convert it into the addis and load offset parts. Addresses
38899 created during the peephole2 process look like:
38900 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38901 (unspec [(...)] UNSPEC_TOCREL))
38903 Addresses created via toc fusion look like:
38904 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38906 static void
38907 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38909 rtx hi, lo;
38911 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38913 lo = XVECEXP (addr, 0, 0);
38914 hi = gen_rtx_HIGH (Pmode, lo);
38916 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38918 hi = XEXP (addr, 0);
38919 lo = XEXP (addr, 1);
38921 else
38922 gcc_unreachable ();
38924 *p_hi = hi;
38925 *p_lo = lo;
38928 /* Return a string to fuse an addis instruction with a gpr load to the same
38929 register that we loaded up the addis instruction. The address that is used
38930 is the logical address that was formed during peephole2:
38931 (lo_sum (high) (low-part))
38933 Or the address is the TOC address that is wrapped before register allocation:
38934 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38936 The code is complicated, so we call output_asm_insn directly, and just
38937 return "". */
38939 const char *
38940 emit_fusion_gpr_load (rtx target, rtx mem)
38942 rtx addis_value;
38943 rtx addr;
38944 rtx load_offset;
38945 const char *load_str = NULL;
38946 machine_mode mode;
38948 if (GET_CODE (mem) == ZERO_EXTEND)
38949 mem = XEXP (mem, 0);
38951 gcc_assert (REG_P (target) && MEM_P (mem));
38953 addr = XEXP (mem, 0);
38954 fusion_split_address (addr, &addis_value, &load_offset);
38956 /* Now emit the load instruction to the same register. */
38957 mode = GET_MODE (mem);
38958 switch (mode)
38960 case E_QImode:
38961 load_str = "lbz";
38962 break;
38964 case E_HImode:
38965 load_str = "lhz";
38966 break;
38968 case E_SImode:
38969 case E_SFmode:
38970 load_str = "lwz";
38971 break;
38973 case E_DImode:
38974 case E_DFmode:
38975 gcc_assert (TARGET_POWERPC64);
38976 load_str = "ld";
38977 break;
38979 default:
38980 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38983 /* Emit the addis instruction. */
38984 emit_fusion_addis (target, addis_value);
38986 /* Emit the D-form load instruction. */
38987 emit_fusion_load_store (target, target, load_offset, load_str);
38989 return "";
38993 /* Return true if the peephole2 can combine a load/store involving a
38994 combination of an addis instruction and the memory operation. This was
38995 added to the ISA 3.0 (power9) hardware. */
38997 bool
38998 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38999 rtx addis_value, /* addis value. */
39000 rtx dest, /* destination (memory or register). */
39001 rtx src) /* source (register or memory). */
39003 rtx addr, mem, offset;
39004 machine_mode mode = GET_MODE (src);
39006 /* Validate arguments. */
39007 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
39008 return false;
39010 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
39011 return false;
39013 /* Ignore extend operations that are part of the load. */
39014 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
39015 src = XEXP (src, 0);
39017 /* Test for memory<-register or register<-memory. */
39018 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
39020 if (!MEM_P (dest))
39021 return false;
39023 mem = dest;
39026 else if (MEM_P (src))
39028 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
39029 return false;
39031 mem = src;
39034 else
39035 return false;
39037 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
39038 if (GET_CODE (addr) == PLUS)
39040 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
39041 return false;
39043 return satisfies_constraint_I (XEXP (addr, 1));
39046 else if (GET_CODE (addr) == LO_SUM)
39048 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
39049 return false;
39051 offset = XEXP (addr, 1);
39052 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
39053 return small_toc_ref (offset, GET_MODE (offset));
39055 else if (TARGET_ELF && !TARGET_POWERPC64)
39056 return CONSTANT_P (offset);
39059 return false;
39062 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39063 load sequence.
39065 The operands are:
39066 operands[0] register set with addis
39067 operands[1] value set via addis
39068 operands[2] target register being loaded
39069 operands[3] D-form memory reference using operands[0].
39071 This is similar to the fusion introduced with power8, except it scales to
39072 both loads/stores and does not require the result register to be the same as
39073 the base register. At the moment, we only do this if register set with addis
39074 is dead. */
39076 void
39077 expand_fusion_p9_load (rtx *operands)
39079 rtx tmp_reg = operands[0];
39080 rtx addis_value = operands[1];
39081 rtx target = operands[2];
39082 rtx orig_mem = operands[3];
39083 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
39084 enum rtx_code plus_or_lo_sum;
39085 machine_mode target_mode = GET_MODE (target);
39086 machine_mode extend_mode = target_mode;
39087 machine_mode ptr_mode = Pmode;
39088 enum rtx_code extend = UNKNOWN;
39090 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
39092 extend = GET_CODE (orig_mem);
39093 orig_mem = XEXP (orig_mem, 0);
39094 target_mode = GET_MODE (orig_mem);
39097 gcc_assert (MEM_P (orig_mem));
39099 orig_addr = XEXP (orig_mem, 0);
39100 plus_or_lo_sum = GET_CODE (orig_addr);
39101 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39103 offset = XEXP (orig_addr, 1);
39104 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39105 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39107 if (extend != UNKNOWN)
39108 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
39110 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
39111 UNSPEC_FUSION_P9);
39113 set = gen_rtx_SET (target, new_mem);
39114 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39115 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39116 emit_insn (insn);
39118 return;
39121 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39122 store sequence.
39124 The operands are:
39125 operands[0] register set with addis
39126 operands[1] value set via addis
39127 operands[2] target D-form memory being stored to
39128 operands[3] register being stored
39130 This is similar to the fusion introduced with power8, except it scales to
39131 both loads/stores and does not require the result register to be the same as
39132 the base register. At the moment, we only do this if register set with addis
39133 is dead. */
39135 void
39136 expand_fusion_p9_store (rtx *operands)
39138 rtx tmp_reg = operands[0];
39139 rtx addis_value = operands[1];
39140 rtx orig_mem = operands[2];
39141 rtx src = operands[3];
39142 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
39143 enum rtx_code plus_or_lo_sum;
39144 machine_mode target_mode = GET_MODE (orig_mem);
39145 machine_mode ptr_mode = Pmode;
39147 gcc_assert (MEM_P (orig_mem));
39149 orig_addr = XEXP (orig_mem, 0);
39150 plus_or_lo_sum = GET_CODE (orig_addr);
39151 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39153 offset = XEXP (orig_addr, 1);
39154 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39155 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39157 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
39158 UNSPEC_FUSION_P9);
39160 set = gen_rtx_SET (new_mem, new_src);
39161 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39162 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39163 emit_insn (insn);
39165 return;
39168 /* Return a string to fuse an addis instruction with a load using extended
39169 fusion. The address that is used is the logical address that was formed
39170 during peephole2: (lo_sum (high) (low-part))
39172 The code is complicated, so we call output_asm_insn directly, and just
39173 return "". */
39175 const char *
39176 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
39178 machine_mode mode = GET_MODE (reg);
39179 rtx hi;
39180 rtx lo;
39181 rtx addr;
39182 const char *load_string;
39183 int r;
39185 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
39187 mem = XEXP (mem, 0);
39188 mode = GET_MODE (mem);
39191 if (GET_CODE (reg) == SUBREG)
39193 gcc_assert (SUBREG_BYTE (reg) == 0);
39194 reg = SUBREG_REG (reg);
39197 if (!REG_P (reg))
39198 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
39200 r = REGNO (reg);
39201 if (FP_REGNO_P (r))
39203 if (mode == SFmode)
39204 load_string = "lfs";
39205 else if (mode == DFmode || mode == DImode)
39206 load_string = "lfd";
39207 else
39208 gcc_unreachable ();
39210 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39212 if (mode == SFmode)
39213 load_string = "lxssp";
39214 else if (mode == DFmode || mode == DImode)
39215 load_string = "lxsd";
39216 else
39217 gcc_unreachable ();
39219 else if (INT_REGNO_P (r))
39221 switch (mode)
39223 case E_QImode:
39224 load_string = "lbz";
39225 break;
39226 case E_HImode:
39227 load_string = "lhz";
39228 break;
39229 case E_SImode:
39230 case E_SFmode:
39231 load_string = "lwz";
39232 break;
39233 case E_DImode:
39234 case E_DFmode:
39235 if (!TARGET_POWERPC64)
39236 gcc_unreachable ();
39237 load_string = "ld";
39238 break;
39239 default:
39240 gcc_unreachable ();
39243 else
39244 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
39246 if (!MEM_P (mem))
39247 fatal_insn ("emit_fusion_p9_load not MEM", mem);
39249 addr = XEXP (mem, 0);
39250 fusion_split_address (addr, &hi, &lo);
39252 /* Emit the addis instruction. */
39253 emit_fusion_addis (tmp_reg, hi);
39255 /* Emit the D-form load instruction. */
39256 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
39258 return "";
39261 /* Return a string to fuse an addis instruction with a store using extended
39262 fusion. The address that is used is the logical address that was formed
39263 during peephole2: (lo_sum (high) (low-part))
39265 The code is complicated, so we call output_asm_insn directly, and just
39266 return "". */
39268 const char *
39269 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
39271 machine_mode mode = GET_MODE (reg);
39272 rtx hi;
39273 rtx lo;
39274 rtx addr;
39275 const char *store_string;
39276 int r;
39278 if (GET_CODE (reg) == SUBREG)
39280 gcc_assert (SUBREG_BYTE (reg) == 0);
39281 reg = SUBREG_REG (reg);
39284 if (!REG_P (reg))
39285 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
39287 r = REGNO (reg);
39288 if (FP_REGNO_P (r))
39290 if (mode == SFmode)
39291 store_string = "stfs";
39292 else if (mode == DFmode)
39293 store_string = "stfd";
39294 else
39295 gcc_unreachable ();
39297 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39299 if (mode == SFmode)
39300 store_string = "stxssp";
39301 else if (mode == DFmode || mode == DImode)
39302 store_string = "stxsd";
39303 else
39304 gcc_unreachable ();
39306 else if (INT_REGNO_P (r))
39308 switch (mode)
39310 case E_QImode:
39311 store_string = "stb";
39312 break;
39313 case E_HImode:
39314 store_string = "sth";
39315 break;
39316 case E_SImode:
39317 case E_SFmode:
39318 store_string = "stw";
39319 break;
39320 case E_DImode:
39321 case E_DFmode:
39322 if (!TARGET_POWERPC64)
39323 gcc_unreachable ();
39324 store_string = "std";
39325 break;
39326 default:
39327 gcc_unreachable ();
39330 else
39331 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
39333 if (!MEM_P (mem))
39334 fatal_insn ("emit_fusion_p9_store not MEM", mem);
39336 addr = XEXP (mem, 0);
39337 fusion_split_address (addr, &hi, &lo);
39339 /* Emit the addis instruction. */
39340 emit_fusion_addis (tmp_reg, hi);
39342 /* Emit the D-form load instruction. */
39343 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
39345 return "";
39348 #ifdef RS6000_GLIBC_ATOMIC_FENV
39349 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39350 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39351 #endif
39353 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39355 static void
39356 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39358 if (!TARGET_HARD_FLOAT)
39360 #ifdef RS6000_GLIBC_ATOMIC_FENV
39361 if (atomic_hold_decl == NULL_TREE)
39363 atomic_hold_decl
39364 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39365 get_identifier ("__atomic_feholdexcept"),
39366 build_function_type_list (void_type_node,
39367 double_ptr_type_node,
39368 NULL_TREE));
39369 TREE_PUBLIC (atomic_hold_decl) = 1;
39370 DECL_EXTERNAL (atomic_hold_decl) = 1;
39373 if (atomic_clear_decl == NULL_TREE)
39375 atomic_clear_decl
39376 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39377 get_identifier ("__atomic_feclearexcept"),
39378 build_function_type_list (void_type_node,
39379 NULL_TREE));
39380 TREE_PUBLIC (atomic_clear_decl) = 1;
39381 DECL_EXTERNAL (atomic_clear_decl) = 1;
39384 tree const_double = build_qualified_type (double_type_node,
39385 TYPE_QUAL_CONST);
39386 tree const_double_ptr = build_pointer_type (const_double);
39387 if (atomic_update_decl == NULL_TREE)
39389 atomic_update_decl
39390 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39391 get_identifier ("__atomic_feupdateenv"),
39392 build_function_type_list (void_type_node,
39393 const_double_ptr,
39394 NULL_TREE));
39395 TREE_PUBLIC (atomic_update_decl) = 1;
39396 DECL_EXTERNAL (atomic_update_decl) = 1;
39399 tree fenv_var = create_tmp_var_raw (double_type_node);
39400 TREE_ADDRESSABLE (fenv_var) = 1;
39401 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39403 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39404 *clear = build_call_expr (atomic_clear_decl, 0);
39405 *update = build_call_expr (atomic_update_decl, 1,
39406 fold_convert (const_double_ptr, fenv_addr));
39407 #endif
39408 return;
39411 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39412 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39413 tree call_mffs = build_call_expr (mffs, 0);
39415 /* Generates the equivalent of feholdexcept (&fenv_var)
39417 *fenv_var = __builtin_mffs ();
39418 double fenv_hold;
39419 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39420 __builtin_mtfsf (0xff, fenv_hold); */
39422 /* Mask to clear everything except for the rounding modes and non-IEEE
39423 arithmetic flag. */
39424 const unsigned HOST_WIDE_INT hold_exception_mask =
39425 HOST_WIDE_INT_C (0xffffffff00000007);
39427 tree fenv_var = create_tmp_var_raw (double_type_node);
39429 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39431 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39432 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39433 build_int_cst (uint64_type_node,
39434 hold_exception_mask));
39436 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39437 fenv_llu_and);
39439 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39440 build_int_cst (unsigned_type_node, 0xff),
39441 fenv_hold_mtfsf);
39443 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39445 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39447 double fenv_clear = __builtin_mffs ();
39448 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39449 __builtin_mtfsf (0xff, fenv_clear); */
39451 /* Mask to clear everything except for the rounding modes and non-IEEE
39452 arithmetic flag. */
39453 const unsigned HOST_WIDE_INT clear_exception_mask =
39454 HOST_WIDE_INT_C (0xffffffff00000000);
39456 tree fenv_clear = create_tmp_var_raw (double_type_node);
39458 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39460 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39461 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39462 fenv_clean_llu,
39463 build_int_cst (uint64_type_node,
39464 clear_exception_mask));
39466 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39467 fenv_clear_llu_and);
39469 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39470 build_int_cst (unsigned_type_node, 0xff),
39471 fenv_clear_mtfsf);
39473 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39475 /* Generates the equivalent of feupdateenv (&fenv_var)
39477 double old_fenv = __builtin_mffs ();
39478 double fenv_update;
39479 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39480 (*(uint64_t*)fenv_var 0x1ff80fff);
39481 __builtin_mtfsf (0xff, fenv_update); */
39483 const unsigned HOST_WIDE_INT update_exception_mask =
39484 HOST_WIDE_INT_C (0xffffffff1fffff00);
39485 const unsigned HOST_WIDE_INT new_exception_mask =
39486 HOST_WIDE_INT_C (0x1ff80fff);
39488 tree old_fenv = create_tmp_var_raw (double_type_node);
39489 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39491 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39492 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39493 build_int_cst (uint64_type_node,
39494 update_exception_mask));
39496 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39497 build_int_cst (uint64_type_node,
39498 new_exception_mask));
39500 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39501 old_llu_and, new_llu_and);
39503 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39504 new_llu_mask);
39506 tree update_mtfsf = build_call_expr (mtfsf, 2,
39507 build_int_cst (unsigned_type_node, 0xff),
39508 fenv_update_mtfsf);
39510 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39513 void
39514 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39516 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39518 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39519 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39521 /* The destination of the vmrgew instruction layout is:
39522 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39523 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39524 vmrgew instruction will be correct. */
39525 if (VECTOR_ELT_ORDER_BIG)
39527 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39528 GEN_INT (0)));
39529 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39530 GEN_INT (3)));
39532 else
39534 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39535 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39538 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39539 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39541 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39542 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39544 if (VECTOR_ELT_ORDER_BIG)
39545 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39546 else
39547 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39550 void
39551 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39553 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39555 rtx_tmp0 = gen_reg_rtx (V2DImode);
39556 rtx_tmp1 = gen_reg_rtx (V2DImode);
39558 /* The destination of the vmrgew instruction layout is:
39559 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39560 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39561 vmrgew instruction will be correct. */
39562 if (VECTOR_ELT_ORDER_BIG)
39564 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39565 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39567 else
39569 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39570 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39573 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39574 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39576 if (signed_convert)
39578 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39579 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39581 else
39583 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39584 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39587 if (VECTOR_ELT_ORDER_BIG)
39588 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39589 else
39590 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39593 void
39594 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39595 rtx src2)
39597 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39599 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39600 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39602 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39603 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39605 rtx_tmp2 = gen_reg_rtx (V4SImode);
39606 rtx_tmp3 = gen_reg_rtx (V4SImode);
39608 if (signed_convert)
39610 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39611 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39613 else
39615 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39616 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39619 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39622 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39624 static bool
39625 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39626 optimization_type opt_type)
39628 switch (op)
39630 case rsqrt_optab:
39631 return (opt_type == OPTIMIZE_FOR_SPEED
39632 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39634 default:
39635 return true;
39639 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39641 static HOST_WIDE_INT
39642 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39644 if (TREE_CODE (exp) == STRING_CST
39645 && (STRICT_ALIGNMENT || !optimize_size))
39646 return MAX (align, BITS_PER_WORD);
39647 return align;
39650 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39652 static HOST_WIDE_INT
39653 rs6000_starting_frame_offset (void)
39655 if (FRAME_GROWS_DOWNWARD)
39656 return 0;
39657 return RS6000_STARTING_FRAME_OFFSET;
39660 struct gcc_target targetm = TARGET_INITIALIZER;
39662 #include "gt-rs6000.h"