gcc/ChangeLog:
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobf736ab26f1f76e519770e59e52f2832326aa141d
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
106 #define min(A,B) ((A) < (B) ? (A) : (B))
107 #define max(A,B) ((A) > (B) ? (A) : (B))
109 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
111 /* Structure used to define the rs6000 stack */
112 typedef struct rs6000_stack {
113 int reload_completed; /* stack info won't change from here on */
114 int first_gp_reg_save; /* first callee saved GP register used */
115 int first_fp_reg_save; /* first callee saved FP register used */
116 int first_altivec_reg_save; /* first callee saved AltiVec register used */
117 int lr_save_p; /* true if the link reg needs to be saved */
118 int cr_save_p; /* true if the CR reg needs to be saved */
119 unsigned int vrsave_mask; /* mask of vec registers to save */
120 int push_p; /* true if we need to allocate stack space */
121 int calls_p; /* true if the function makes any calls */
122 int world_save_p; /* true if we're saving *everything*:
123 r13-r31, cr, f14-f31, vrsave, v20-v31 */
124 enum rs6000_abi abi; /* which ABI to use */
125 int gp_save_offset; /* offset to save GP regs from initial SP */
126 int fp_save_offset; /* offset to save FP regs from initial SP */
127 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
128 int lr_save_offset; /* offset to save LR from initial SP */
129 int cr_save_offset; /* offset to save CR from initial SP */
130 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
131 int varargs_save_offset; /* offset to save the varargs registers */
132 int ehrd_offset; /* offset to EH return data */
133 int ehcr_offset; /* offset to EH CR field data */
134 int reg_size; /* register size (4 or 8) */
135 HOST_WIDE_INT vars_size; /* variable save area size */
136 int parm_size; /* outgoing parameter size */
137 int save_size; /* save area size */
138 int fixed_size; /* fixed size of stack frame */
139 int gp_size; /* size of saved GP registers */
140 int fp_size; /* size of saved FP registers */
141 int altivec_size; /* size of saved AltiVec registers */
142 int cr_size; /* size to hold CR if not in fixed area */
143 int vrsave_size; /* size to hold VRSAVE */
144 int altivec_padding_size; /* size of altivec alignment padding */
145 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
146 int savres_strategy;
147 } rs6000_stack_t;
149 /* A C structure for machine-specific, per-function data.
150 This is added to the cfun structure. */
151 typedef struct GTY(()) machine_function
153 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
154 int ra_needs_full_frame;
155 /* Flags if __builtin_return_address (0) was used. */
156 int ra_need_lr;
157 /* Cache lr_save_p after expansion of builtin_eh_return. */
158 int lr_save_state;
159 /* Whether we need to save the TOC to the reserved stack location in the
160 function prologue. */
161 bool save_toc_in_prologue;
162 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
163 varargs save area. */
164 HOST_WIDE_INT varargs_save_offset;
165 /* Alternative internal arg pointer for -fsplit-stack. */
166 rtx split_stack_arg_pointer;
167 bool split_stack_argp_used;
168 /* Flag if r2 setup is needed with ELFv2 ABI. */
169 bool r2_setup_needed;
170 /* The number of components we use for separate shrink-wrapping. */
171 int n_components;
172 /* The components already handled by separate shrink-wrapping, which should
173 not be considered by the prologue and epilogue. */
174 bool gpr_is_wrapped_separately[32];
175 bool fpr_is_wrapped_separately[32];
176 bool lr_is_wrapped_separately;
177 bool toc_is_wrapped_separately;
178 } machine_function;
180 /* Support targetm.vectorize.builtin_mask_for_load. */
181 static GTY(()) tree altivec_builtin_mask_for_load;
183 /* Set to nonzero once AIX common-mode calls have been defined. */
184 static GTY(()) int common_mode_defined;
186 /* Label number of label created for -mrelocatable, to call to so we can
187 get the address of the GOT section */
188 static int rs6000_pic_labelno;
190 #ifdef USING_ELFOS_H
191 /* Counter for labels which are to be placed in .fixup. */
192 int fixuplabelno = 0;
193 #endif
195 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
196 int dot_symbols;
198 /* Specify the machine mode that pointers have. After generation of rtl, the
199 compiler makes no further distinction between pointers and any other objects
200 of this machine mode. */
201 scalar_int_mode rs6000_pmode;
203 /* Width in bits of a pointer. */
204 unsigned rs6000_pointer_size;
206 #ifdef HAVE_AS_GNU_ATTRIBUTE
207 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
208 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
209 # endif
210 /* Flag whether floating point values have been passed/returned.
211 Note that this doesn't say whether fprs are used, since the
212 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
213 should be set for soft-float values passed in gprs and ieee128
214 values passed in vsx registers. */
215 static bool rs6000_passes_float;
216 static bool rs6000_passes_long_double;
217 /* Flag whether vector values have been passed/returned. */
218 static bool rs6000_passes_vector;
219 /* Flag whether small (<= 8 byte) structures have been returned. */
220 static bool rs6000_returns_struct;
221 #endif
223 /* Value is TRUE if register/mode pair is acceptable. */
224 static bool rs6000_hard_regno_mode_ok_p
225 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
227 /* Maximum number of registers needed for a given register class and mode. */
228 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
230 /* How many registers are needed for a given register and mode. */
231 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
233 /* Map register number to register class. */
234 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
236 static int dbg_cost_ctrl;
238 /* Built in types. */
239 tree rs6000_builtin_types[RS6000_BTI_MAX];
240 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
242 /* Flag to say the TOC is initialized */
243 int toc_initialized, need_toc_init;
244 char toc_label_name[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more;
250 static GTY(()) section *read_only_data_section;
251 static GTY(()) section *private_data_section;
252 static GTY(()) section *tls_data_section;
253 static GTY(()) section *tls_private_data_section;
254 static GTY(()) section *read_only_private_data_section;
255 static GTY(()) section *sdata2_section;
256 static GTY(()) section *toc_section;
258 struct builtin_description
260 const HOST_WIDE_INT mask;
261 const enum insn_code icode;
262 const char *const name;
263 const enum rs6000_builtins code;
266 /* Describe the vector unit used for modes. */
267 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
268 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
270 /* Register classes for various constraints that are based on the target
271 switches. */
272 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
274 /* Describe the alignment of a vector. */
275 int rs6000_vector_align[NUM_MACHINE_MODES];
277 /* Map selected modes to types for builtins. */
278 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
280 /* What modes to automatically generate reciprocal divide estimate (fre) and
281 reciprocal sqrt (frsqrte) for. */
282 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
284 /* Masks to determine which reciprocal esitmate instructions to generate
285 automatically. */
286 enum rs6000_recip_mask {
287 RECIP_SF_DIV = 0x001, /* Use divide estimate */
288 RECIP_DF_DIV = 0x002,
289 RECIP_V4SF_DIV = 0x004,
290 RECIP_V2DF_DIV = 0x008,
292 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
293 RECIP_DF_RSQRT = 0x020,
294 RECIP_V4SF_RSQRT = 0x040,
295 RECIP_V2DF_RSQRT = 0x080,
297 /* Various combination of flags for -mrecip=xxx. */
298 RECIP_NONE = 0,
299 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
301 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
303 RECIP_HIGH_PRECISION = RECIP_ALL,
305 /* On low precision machines like the power5, don't enable double precision
306 reciprocal square root estimate, since it isn't accurate enough. */
307 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
310 /* -mrecip options. */
311 static struct
313 const char *string; /* option name */
314 unsigned int mask; /* mask bits to set */
315 } recip_options[] = {
316 { "all", RECIP_ALL },
317 { "none", RECIP_NONE },
318 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
319 | RECIP_V2DF_DIV) },
320 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
321 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
322 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
323 | RECIP_V2DF_RSQRT) },
324 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
325 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
328 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
329 static const struct
331 const char *cpu;
332 unsigned int cpuid;
333 } cpu_is_info[] = {
334 { "power9", PPC_PLATFORM_POWER9 },
335 { "power8", PPC_PLATFORM_POWER8 },
336 { "power7", PPC_PLATFORM_POWER7 },
337 { "power6x", PPC_PLATFORM_POWER6X },
338 { "power6", PPC_PLATFORM_POWER6 },
339 { "power5+", PPC_PLATFORM_POWER5_PLUS },
340 { "power5", PPC_PLATFORM_POWER5 },
341 { "ppc970", PPC_PLATFORM_PPC970 },
342 { "power4", PPC_PLATFORM_POWER4 },
343 { "ppca2", PPC_PLATFORM_PPCA2 },
344 { "ppc476", PPC_PLATFORM_PPC476 },
345 { "ppc464", PPC_PLATFORM_PPC464 },
346 { "ppc440", PPC_PLATFORM_PPC440 },
347 { "ppc405", PPC_PLATFORM_PPC405 },
348 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
351 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
352 static const struct
354 const char *hwcap;
355 int mask;
356 unsigned int id;
357 } cpu_supports_info[] = {
358 /* AT_HWCAP masks. */
359 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
360 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
361 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
362 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
363 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
364 { "booke", PPC_FEATURE_BOOKE, 0 },
365 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
366 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
367 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
368 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
369 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
370 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
371 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
372 { "notb", PPC_FEATURE_NO_TB, 0 },
373 { "pa6t", PPC_FEATURE_PA6T, 0 },
374 { "power4", PPC_FEATURE_POWER4, 0 },
375 { "power5", PPC_FEATURE_POWER5, 0 },
376 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
377 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
378 { "ppc32", PPC_FEATURE_32, 0 },
379 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
380 { "ppc64", PPC_FEATURE_64, 0 },
381 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
382 { "smt", PPC_FEATURE_SMT, 0 },
383 { "spe", PPC_FEATURE_HAS_SPE, 0 },
384 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
385 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
386 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
388 /* AT_HWCAP2 masks. */
389 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
390 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
391 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
392 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
393 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
394 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
395 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
396 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
397 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
398 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
399 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
400 { "darn", PPC_FEATURE2_DARN, 1 },
401 { "scv", PPC_FEATURE2_SCV, 1 }
404 /* On PowerPC, we have a limited number of target clones that we care about
405 which means we can use an array to hold the options, rather than having more
406 elaborate data structures to identify each possible variation. Order the
407 clones from the default to the highest ISA. */
408 enum {
409 CLONE_DEFAULT = 0, /* default clone. */
410 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
411 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
412 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
413 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
414 CLONE_MAX
417 /* Map compiler ISA bits into HWCAP names. */
418 struct clone_map {
419 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
420 const char *name; /* name to use in __builtin_cpu_supports. */
423 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
424 { 0, "" }, /* Default options. */
425 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
426 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
427 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
428 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
432 /* Newer LIBCs explicitly export this symbol to declare that they provide
433 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
434 reference to this symbol whenever we expand a CPU builtin, so that
435 we never link against an old LIBC. */
436 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
438 /* True if we have expanded a CPU builtin. */
439 bool cpu_builtin_p;
441 /* Pointer to function (in rs6000-c.c) that can define or undefine target
442 macros that have changed. Languages that don't support the preprocessor
443 don't link in rs6000-c.c, so we can't call it directly. */
444 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
446 /* Simplfy register classes into simpler classifications. We assume
447 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
448 check for standard register classes (gpr/floating/altivec/vsx) and
449 floating/vector classes (float/altivec/vsx). */
451 enum rs6000_reg_type {
452 NO_REG_TYPE,
453 PSEUDO_REG_TYPE,
454 GPR_REG_TYPE,
455 VSX_REG_TYPE,
456 ALTIVEC_REG_TYPE,
457 FPR_REG_TYPE,
458 SPR_REG_TYPE,
459 CR_REG_TYPE
462 /* Map register class to register type. */
463 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
465 /* First/last register type for the 'normal' register types (i.e. general
466 purpose, floating point, altivec, and VSX registers). */
467 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
469 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
472 /* Register classes we care about in secondary reload or go if legitimate
473 address. We only need to worry about GPR, FPR, and Altivec registers here,
474 along an ANY field that is the OR of the 3 register classes. */
476 enum rs6000_reload_reg_type {
477 RELOAD_REG_GPR, /* General purpose registers. */
478 RELOAD_REG_FPR, /* Traditional floating point regs. */
479 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
480 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
481 N_RELOAD_REG
484 /* For setting up register classes, loop through the 3 register classes mapping
485 into real registers, and skip the ANY class, which is just an OR of the
486 bits. */
487 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
488 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
490 /* Map reload register type to a register in the register class. */
491 struct reload_reg_map_type {
492 const char *name; /* Register class name. */
493 int reg; /* Register in the register class. */
496 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
497 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
498 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
499 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
500 { "Any", -1 }, /* RELOAD_REG_ANY. */
503 /* Mask bits for each register class, indexed per mode. Historically the
504 compiler has been more restrictive which types can do PRE_MODIFY instead of
505 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
506 typedef unsigned char addr_mask_type;
508 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
509 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
510 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
511 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
512 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
513 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
514 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
515 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
517 /* Register type masks based on the type, of valid addressing modes. */
518 struct rs6000_reg_addr {
519 enum insn_code reload_load; /* INSN to reload for loading. */
520 enum insn_code reload_store; /* INSN to reload for storing. */
521 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
522 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
523 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
524 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
525 /* INSNs for fusing addi with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
529 /* INSNs for fusing addis with loads
530 or stores for each reg. class. */
531 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
532 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
533 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
534 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
535 bool fused_toc; /* Mode supports TOC fusion. */
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
556 /* Given that there exists at least one variable that is set (produced)
557 by OUT_INSN and read (consumed) by IN_INSN, return true iff
558 IN_INSN represents one or more memory store operations and none of
559 the variables set by OUT_INSN is used by IN_INSN as the address of a
560 store operation. If either IN_INSN or OUT_INSN does not represent
561 a "single" RTL SET expression (as loosely defined by the
562 implementation of the single_set function) or a PARALLEL with only
563 SETs, CLOBBERs, and USEs inside, this function returns false.
565 This rs6000-specific version of store_data_bypass_p checks for
566 certain conditions that result in assertion failures (and internal
567 compiler errors) in the generic store_data_bypass_p function and
568 returns false rather than calling store_data_bypass_p if one of the
569 problematic conditions is detected. */
572 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
574 rtx out_set, in_set;
575 rtx out_pat, in_pat;
576 rtx out_exp, in_exp;
577 int i, j;
579 in_set = single_set (in_insn);
580 if (in_set)
582 if (MEM_P (SET_DEST (in_set)))
584 out_set = single_set (out_insn);
585 if (!out_set)
587 out_pat = PATTERN (out_insn);
588 if (GET_CODE (out_pat) == PARALLEL)
590 for (i = 0; i < XVECLEN (out_pat, 0); i++)
592 out_exp = XVECEXP (out_pat, 0, i);
593 if ((GET_CODE (out_exp) == CLOBBER)
594 || (GET_CODE (out_exp) == USE))
595 continue;
596 else if (GET_CODE (out_exp) != SET)
597 return false;
603 else
605 in_pat = PATTERN (in_insn);
606 if (GET_CODE (in_pat) != PARALLEL)
607 return false;
609 for (i = 0; i < XVECLEN (in_pat, 0); i++)
611 in_exp = XVECEXP (in_pat, 0, i);
612 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
613 continue;
614 else if (GET_CODE (in_exp) != SET)
615 return false;
617 if (MEM_P (SET_DEST (in_exp)))
619 out_set = single_set (out_insn);
620 if (!out_set)
622 out_pat = PATTERN (out_insn);
623 if (GET_CODE (out_pat) != PARALLEL)
624 return false;
625 for (j = 0; j < XVECLEN (out_pat, 0); j++)
627 out_exp = XVECEXP (out_pat, 0, j);
628 if ((GET_CODE (out_exp) == CLOBBER)
629 || (GET_CODE (out_exp) == USE))
630 continue;
631 else if (GET_CODE (out_exp) != SET)
632 return false;
638 return store_data_bypass_p (out_insn, in_insn);
641 /* Return true if we have D-form addressing in altivec registers. */
642 static inline bool
643 mode_supports_vmx_dform (machine_mode mode)
645 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
648 /* Return true if we have D-form addressing in VSX registers. This addressing
649 is more limited than normal d-form addressing in that the offset must be
650 aligned on a 16-byte boundary. */
651 static inline bool
652 mode_supports_vsx_dform_quad (machine_mode mode)
654 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
655 != 0);
659 /* Processor costs (relative to an add) */
661 const struct processor_costs *rs6000_cost;
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_Q
1275 #undef RS6000_BUILTIN_X
1277 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 struct rs6000_builtin_info_type {
1308 const char *name;
1309 const enum insn_code icode;
1310 const HOST_WIDE_INT mask;
1311 const unsigned attr;
1314 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1316 #include "rs6000-builtin.def"
1319 #undef RS6000_BUILTIN_0
1320 #undef RS6000_BUILTIN_1
1321 #undef RS6000_BUILTIN_2
1322 #undef RS6000_BUILTIN_3
1323 #undef RS6000_BUILTIN_A
1324 #undef RS6000_BUILTIN_D
1325 #undef RS6000_BUILTIN_H
1326 #undef RS6000_BUILTIN_P
1327 #undef RS6000_BUILTIN_Q
1328 #undef RS6000_BUILTIN_X
1330 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1331 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1334 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1335 static struct machine_function * rs6000_init_machine_status (void);
1336 static int rs6000_ra_ever_killed (void);
1337 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1338 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1339 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1341 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1342 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1343 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1344 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1345 bool);
1346 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1347 unsigned int);
1348 static bool is_microcoded_insn (rtx_insn *);
1349 static bool is_nonpipeline_insn (rtx_insn *);
1350 static bool is_cracked_insn (rtx_insn *);
1351 static bool is_load_insn (rtx, rtx *);
1352 static bool is_store_insn (rtx, rtx *);
1353 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1354 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1355 static bool insn_must_be_first_in_group (rtx_insn *);
1356 static bool insn_must_be_last_in_group (rtx_insn *);
1357 static void altivec_init_builtins (void);
1358 static tree builtin_function_type (machine_mode, machine_mode,
1359 machine_mode, machine_mode,
1360 enum rs6000_builtins, const char *name);
1361 static void rs6000_common_init_builtins (void);
1362 static void paired_init_builtins (void);
1363 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1364 static void htm_init_builtins (void);
1365 static rs6000_stack_t *rs6000_stack_info (void);
1366 static void is_altivec_return_reg (rtx, void *);
1367 int easy_vector_constant (rtx, machine_mode);
1368 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1369 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1370 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1371 bool, bool);
1372 #if TARGET_MACHO
1373 static void macho_branch_islands (void);
1374 #endif
1375 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1376 int, int *);
1377 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1378 int, int, int *);
1379 static bool rs6000_mode_dependent_address (const_rtx);
1380 static bool rs6000_debug_mode_dependent_address (const_rtx);
1381 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1382 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1383 machine_mode, rtx);
1384 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1385 machine_mode,
1386 rtx);
1387 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1388 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1389 enum reg_class);
1390 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1391 reg_class_t,
1392 reg_class_t);
1393 static bool rs6000_debug_can_change_mode_class (machine_mode,
1394 machine_mode,
1395 reg_class_t);
1396 static bool rs6000_save_toc_in_prologue_p (void);
1397 static rtx rs6000_internal_arg_pointer (void);
1399 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1400 int, int *)
1401 = rs6000_legitimize_reload_address;
1403 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1404 = rs6000_mode_dependent_address;
1406 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1407 machine_mode, rtx)
1408 = rs6000_secondary_reload_class;
1410 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1411 = rs6000_preferred_reload_class;
1413 const int INSN_NOT_AVAILABLE = -1;
1415 static void rs6000_print_isa_options (FILE *, int, const char *,
1416 HOST_WIDE_INT);
1417 static void rs6000_print_builtin_options (FILE *, int, const char *,
1418 HOST_WIDE_INT);
1419 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1421 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1422 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1423 enum rs6000_reg_type,
1424 machine_mode,
1425 secondary_reload_info *,
1426 bool);
1427 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1428 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1429 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1431 /* Hash table stuff for keeping track of TOC entries. */
1433 struct GTY((for_user)) toc_hash_struct
1435 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1436 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1437 rtx key;
1438 machine_mode key_mode;
1439 int labelno;
1442 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1444 static hashval_t hash (toc_hash_struct *);
1445 static bool equal (toc_hash_struct *, toc_hash_struct *);
1448 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1450 /* Hash table to keep track of the argument types for builtin functions. */
1452 struct GTY((for_user)) builtin_hash_struct
1454 tree type;
1455 machine_mode mode[4]; /* return value + 3 arguments. */
1456 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1459 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1461 static hashval_t hash (builtin_hash_struct *);
1462 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1465 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1468 /* Default register names. */
1469 char rs6000_reg_names[][8] =
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "0", "1", "2", "3", "4", "5", "6", "7",
1476 "8", "9", "10", "11", "12", "13", "14", "15",
1477 "16", "17", "18", "19", "20", "21", "22", "23",
1478 "24", "25", "26", "27", "28", "29", "30", "31",
1479 "mq", "lr", "ctr","ap",
1480 "0", "1", "2", "3", "4", "5", "6", "7",
1481 "ca",
1482 /* AltiVec registers. */
1483 "0", "1", "2", "3", "4", "5", "6", "7",
1484 "8", "9", "10", "11", "12", "13", "14", "15",
1485 "16", "17", "18", "19", "20", "21", "22", "23",
1486 "24", "25", "26", "27", "28", "29", "30", "31",
1487 "vrsave", "vscr",
1488 /* Soft frame pointer. */
1489 "sfp",
1490 /* HTM SPR registers. */
1491 "tfhar", "tfiar", "texasr"
1494 #ifdef TARGET_REGNAMES
1495 static const char alt_reg_names[][8] =
1497 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1498 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1499 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1500 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1501 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1502 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1503 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1504 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1505 "mq", "lr", "ctr", "ap",
1506 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1507 "ca",
1508 /* AltiVec registers. */
1509 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1510 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1511 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1512 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1513 "vrsave", "vscr",
1514 /* Soft frame pointer. */
1515 "sfp",
1516 /* HTM SPR registers. */
1517 "tfhar", "tfiar", "texasr"
1519 #endif
1521 /* Table of valid machine attributes. */
1523 static const struct attribute_spec rs6000_attribute_table[] =
1525 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1526 affects_type_identity, handler, exclude } */
1527 { "altivec", 1, 1, false, true, false, false,
1528 rs6000_handle_altivec_attribute, NULL },
1529 { "longcall", 0, 0, false, true, true, false,
1530 rs6000_handle_longcall_attribute, NULL },
1531 { "shortcall", 0, 0, false, true, true, false,
1532 rs6000_handle_longcall_attribute, NULL },
1533 { "ms_struct", 0, 0, false, false, false, false,
1534 rs6000_handle_struct_attribute, NULL },
1535 { "gcc_struct", 0, 0, false, false, false, false,
1536 rs6000_handle_struct_attribute, NULL },
1537 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1538 SUBTARGET_ATTRIBUTE_TABLE,
1539 #endif
1540 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1543 #ifndef TARGET_PROFILE_KERNEL
1544 #define TARGET_PROFILE_KERNEL 0
1545 #endif
1547 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1548 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1550 /* Initialize the GCC target structure. */
1551 #undef TARGET_ATTRIBUTE_TABLE
1552 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1553 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1554 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1555 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1556 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1558 #undef TARGET_ASM_ALIGNED_DI_OP
1559 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1561 /* Default unaligned ops are only provided for ELF. Find the ops needed
1562 for non-ELF systems. */
1563 #ifndef OBJECT_FORMAT_ELF
1564 #if TARGET_XCOFF
1565 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1566 64-bit targets. */
1567 #undef TARGET_ASM_UNALIGNED_HI_OP
1568 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1569 #undef TARGET_ASM_UNALIGNED_SI_OP
1570 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1571 #undef TARGET_ASM_UNALIGNED_DI_OP
1572 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1573 #else
1574 /* For Darwin. */
1575 #undef TARGET_ASM_UNALIGNED_HI_OP
1576 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1577 #undef TARGET_ASM_UNALIGNED_SI_OP
1578 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1579 #undef TARGET_ASM_UNALIGNED_DI_OP
1580 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1581 #undef TARGET_ASM_ALIGNED_DI_OP
1582 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1583 #endif
1584 #endif
1586 /* This hook deals with fixups for relocatable code and DI-mode objects
1587 in 64-bit code. */
1588 #undef TARGET_ASM_INTEGER
1589 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1591 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1592 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1593 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1594 #endif
1596 #undef TARGET_SET_UP_BY_PROLOGUE
1597 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1599 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1601 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1602 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1603 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1605 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1607 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1608 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1609 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1610 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1612 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1613 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1615 #undef TARGET_INTERNAL_ARG_POINTER
1616 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1618 #undef TARGET_HAVE_TLS
1619 #define TARGET_HAVE_TLS HAVE_AS_TLS
1621 #undef TARGET_CANNOT_FORCE_CONST_MEM
1622 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1624 #undef TARGET_DELEGITIMIZE_ADDRESS
1625 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1627 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1628 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1630 #undef TARGET_LEGITIMATE_COMBINED_INSN
1631 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1633 #undef TARGET_ASM_FUNCTION_PROLOGUE
1634 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1635 #undef TARGET_ASM_FUNCTION_EPILOGUE
1636 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1638 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1639 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1641 #undef TARGET_LEGITIMIZE_ADDRESS
1642 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1644 #undef TARGET_SCHED_VARIABLE_ISSUE
1645 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1647 #undef TARGET_SCHED_ISSUE_RATE
1648 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1649 #undef TARGET_SCHED_ADJUST_COST
1650 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1651 #undef TARGET_SCHED_ADJUST_PRIORITY
1652 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1653 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1654 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1655 #undef TARGET_SCHED_INIT
1656 #define TARGET_SCHED_INIT rs6000_sched_init
1657 #undef TARGET_SCHED_FINISH
1658 #define TARGET_SCHED_FINISH rs6000_sched_finish
1659 #undef TARGET_SCHED_REORDER
1660 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1661 #undef TARGET_SCHED_REORDER2
1662 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1664 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1665 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1667 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1668 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1670 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1671 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1672 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1673 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1674 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1675 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1676 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1677 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1679 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1680 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1682 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1683 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1684 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1685 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1686 rs6000_builtin_support_vector_misalignment
1687 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1688 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1689 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1690 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1691 rs6000_builtin_vectorization_cost
1692 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1693 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1694 rs6000_preferred_simd_mode
1695 #undef TARGET_VECTORIZE_INIT_COST
1696 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1697 #undef TARGET_VECTORIZE_ADD_STMT_COST
1698 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1699 #undef TARGET_VECTORIZE_FINISH_COST
1700 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1701 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1702 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1704 #undef TARGET_INIT_BUILTINS
1705 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1706 #undef TARGET_BUILTIN_DECL
1707 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1709 #undef TARGET_FOLD_BUILTIN
1710 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1711 #undef TARGET_GIMPLE_FOLD_BUILTIN
1712 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1714 #undef TARGET_EXPAND_BUILTIN
1715 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1717 #undef TARGET_MANGLE_TYPE
1718 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1720 #undef TARGET_INIT_LIBFUNCS
1721 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1723 #if TARGET_MACHO
1724 #undef TARGET_BINDS_LOCAL_P
1725 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1726 #endif
1728 #undef TARGET_MS_BITFIELD_LAYOUT_P
1729 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1731 #undef TARGET_ASM_OUTPUT_MI_THUNK
1732 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1734 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1735 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1737 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1738 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1740 #undef TARGET_REGISTER_MOVE_COST
1741 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1742 #undef TARGET_MEMORY_MOVE_COST
1743 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1744 #undef TARGET_CANNOT_COPY_INSN_P
1745 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1746 #undef TARGET_RTX_COSTS
1747 #define TARGET_RTX_COSTS rs6000_rtx_costs
1748 #undef TARGET_ADDRESS_COST
1749 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1750 #undef TARGET_INSN_COST
1751 #define TARGET_INSN_COST rs6000_insn_cost
1753 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1754 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1756 #undef TARGET_PROMOTE_FUNCTION_MODE
1757 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1759 #undef TARGET_RETURN_IN_MEMORY
1760 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1762 #undef TARGET_RETURN_IN_MSB
1763 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1765 #undef TARGET_SETUP_INCOMING_VARARGS
1766 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1768 /* Always strict argument naming on rs6000. */
1769 #undef TARGET_STRICT_ARGUMENT_NAMING
1770 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1771 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1772 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1773 #undef TARGET_SPLIT_COMPLEX_ARG
1774 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1775 #undef TARGET_MUST_PASS_IN_STACK
1776 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1777 #undef TARGET_PASS_BY_REFERENCE
1778 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1779 #undef TARGET_ARG_PARTIAL_BYTES
1780 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1781 #undef TARGET_FUNCTION_ARG_ADVANCE
1782 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1783 #undef TARGET_FUNCTION_ARG
1784 #define TARGET_FUNCTION_ARG rs6000_function_arg
1785 #undef TARGET_FUNCTION_ARG_PADDING
1786 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1787 #undef TARGET_FUNCTION_ARG_BOUNDARY
1788 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1790 #undef TARGET_BUILD_BUILTIN_VA_LIST
1791 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1793 #undef TARGET_EXPAND_BUILTIN_VA_START
1794 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1796 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1797 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1799 #undef TARGET_EH_RETURN_FILTER_MODE
1800 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1802 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1803 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1805 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1806 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1808 #undef TARGET_FLOATN_MODE
1809 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1811 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1812 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1814 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1815 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1817 #undef TARGET_MD_ASM_ADJUST
1818 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1820 #undef TARGET_OPTION_OVERRIDE
1821 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1823 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1825 rs6000_builtin_vectorized_function
1827 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1828 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1829 rs6000_builtin_md_vectorized_function
1831 #undef TARGET_STACK_PROTECT_GUARD
1832 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1834 #if !TARGET_MACHO
1835 #undef TARGET_STACK_PROTECT_FAIL
1836 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1837 #endif
1839 #ifdef HAVE_AS_TLS
1840 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1841 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1842 #endif
1844 /* Use a 32-bit anchor range. This leads to sequences like:
1846 addis tmp,anchor,high
1847 add dest,tmp,low
1849 where tmp itself acts as an anchor, and can be shared between
1850 accesses to the same 64k page. */
1851 #undef TARGET_MIN_ANCHOR_OFFSET
1852 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1853 #undef TARGET_MAX_ANCHOR_OFFSET
1854 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1855 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1856 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1857 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1858 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1860 #undef TARGET_BUILTIN_RECIPROCAL
1861 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1863 #undef TARGET_SECONDARY_RELOAD
1864 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1865 #undef TARGET_SECONDARY_MEMORY_NEEDED
1866 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1867 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1868 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1870 #undef TARGET_LEGITIMATE_ADDRESS_P
1871 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1873 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1874 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1876 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1877 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1879 #undef TARGET_CAN_ELIMINATE
1880 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1882 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1883 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1885 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1886 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1888 #undef TARGET_TRAMPOLINE_INIT
1889 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1891 #undef TARGET_FUNCTION_VALUE
1892 #define TARGET_FUNCTION_VALUE rs6000_function_value
1894 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1895 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1897 #undef TARGET_OPTION_SAVE
1898 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1900 #undef TARGET_OPTION_RESTORE
1901 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1903 #undef TARGET_OPTION_PRINT
1904 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1906 #undef TARGET_CAN_INLINE_P
1907 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1909 #undef TARGET_SET_CURRENT_FUNCTION
1910 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1912 #undef TARGET_LEGITIMATE_CONSTANT_P
1913 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1915 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1916 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1918 #undef TARGET_CAN_USE_DOLOOP_P
1919 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1921 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1922 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1924 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1925 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1926 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1927 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1928 #undef TARGET_UNWIND_WORD_MODE
1929 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1931 #undef TARGET_OFFLOAD_OPTIONS
1932 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1934 #undef TARGET_C_MODE_FOR_SUFFIX
1935 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1937 #undef TARGET_INVALID_BINARY_OP
1938 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1940 #undef TARGET_OPTAB_SUPPORTED_P
1941 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1943 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1944 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1946 #undef TARGET_COMPARE_VERSION_PRIORITY
1947 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1949 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1950 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1951 rs6000_generate_version_dispatcher_body
1953 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1954 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1955 rs6000_get_function_versions_dispatcher
1957 #undef TARGET_OPTION_FUNCTION_VERSIONS
1958 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1960 #undef TARGET_HARD_REGNO_NREGS
1961 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1962 #undef TARGET_HARD_REGNO_MODE_OK
1963 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1965 #undef TARGET_MODES_TIEABLE_P
1966 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1968 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1969 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1970 rs6000_hard_regno_call_part_clobbered
1972 #undef TARGET_SLOW_UNALIGNED_ACCESS
1973 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1975 #undef TARGET_CAN_CHANGE_MODE_CLASS
1976 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1978 #undef TARGET_CONSTANT_ALIGNMENT
1979 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1981 #undef TARGET_STARTING_FRAME_OFFSET
1982 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1985 /* Processor table. */
1986 struct rs6000_ptt
1988 const char *const name; /* Canonical processor name. */
1989 const enum processor_type processor; /* Processor type enum value. */
1990 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 static struct rs6000_ptt const processor_target_table[] =
1995 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1996 #include "rs6000-cpus.def"
1997 #undef RS6000_CPU
2000 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2001 name is invalid. */
2003 static int
2004 rs6000_cpu_name_lookup (const char *name)
2006 size_t i;
2008 if (name != NULL)
2010 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2011 if (! strcmp (name, processor_target_table[i].name))
2012 return (int)i;
2015 return -1;
2019 /* Return number of consecutive hard regs needed starting at reg REGNO
2020 to hold something of mode MODE.
2021 This is ordinarily the length in words of a value of mode MODE
2022 but can be less for certain modes in special long registers.
2024 POWER and PowerPC GPRs hold 32 bits worth;
2025 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2027 static int
2028 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2030 unsigned HOST_WIDE_INT reg_size;
2032 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2033 128-bit floating point that can go in vector registers, which has VSX
2034 memory addressing. */
2035 if (FP_REGNO_P (regno))
2036 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2037 ? UNITS_PER_VSX_WORD
2038 : UNITS_PER_FP_WORD);
2040 else if (ALTIVEC_REGNO_P (regno))
2041 reg_size = UNITS_PER_ALTIVEC_WORD;
2043 else
2044 reg_size = UNITS_PER_WORD;
2046 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2050 MODE. */
2051 static int
2052 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2054 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2056 if (COMPLEX_MODE_P (mode))
2057 mode = GET_MODE_INNER (mode);
2059 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2060 register combinations, and use PTImode where we need to deal with quad
2061 word memory operations. Don't allow quad words in the argument or frame
2062 pointer registers, just registers 0..31. */
2063 if (mode == PTImode)
2064 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2065 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2066 && ((regno & 1) == 0));
2068 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2069 implementations. Don't allow an item to be split between a FP register
2070 and an Altivec register. Allow TImode in all VSX registers if the user
2071 asked for it. */
2072 if (TARGET_VSX && VSX_REGNO_P (regno)
2073 && (VECTOR_MEM_VSX_P (mode)
2074 || FLOAT128_VECTOR_P (mode)
2075 || reg_addr[mode].scalar_in_vmx_p
2076 || mode == TImode
2077 || (TARGET_VADDUQM && mode == V1TImode)))
2079 if (FP_REGNO_P (regno))
2080 return FP_REGNO_P (last_regno);
2082 if (ALTIVEC_REGNO_P (regno))
2084 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2085 return 0;
2087 return ALTIVEC_REGNO_P (last_regno);
2091 /* The GPRs can hold any mode, but values bigger than one register
2092 cannot go past R31. */
2093 if (INT_REGNO_P (regno))
2094 return INT_REGNO_P (last_regno);
2096 /* The float registers (except for VSX vector modes) can only hold floating
2097 modes and DImode. */
2098 if (FP_REGNO_P (regno))
2100 if (FLOAT128_VECTOR_P (mode))
2101 return false;
2103 if (SCALAR_FLOAT_MODE_P (mode)
2104 && (mode != TDmode || (regno % 2) == 0)
2105 && FP_REGNO_P (last_regno))
2106 return 1;
2108 if (GET_MODE_CLASS (mode) == MODE_INT)
2110 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2111 return 1;
2113 if (TARGET_P8_VECTOR && (mode == SImode))
2114 return 1;
2116 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2117 return 1;
2120 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2121 && PAIRED_VECTOR_MODE (mode))
2122 return 1;
2124 return 0;
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno))
2129 return GET_MODE_CLASS (mode) == MODE_CC;
2131 if (CA_REGNO_P (regno))
2132 return mode == Pmode || mode == SImode;
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2137 || mode == V1TImode);
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2142 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2147 static unsigned int
2148 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2150 return rs6000_hard_regno_nregs[mode][regno];
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2155 static bool
2156 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2158 return rs6000_hard_regno_mode_ok_p[mode][regno];
2161 /* Implement TARGET_MODES_TIEABLE_P.
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 57744).
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2170 static bool
2171 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2173 if (mode1 == PTImode)
2174 return mode2 == PTImode;
2175 if (mode2 == PTImode)
2176 return false;
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2181 return false;
2183 if (SCALAR_FLOAT_MODE_P (mode1))
2184 return SCALAR_FLOAT_MODE_P (mode2);
2185 if (SCALAR_FLOAT_MODE_P (mode2))
2186 return false;
2188 if (GET_MODE_CLASS (mode1) == MODE_CC)
2189 return GET_MODE_CLASS (mode2) == MODE_CC;
2190 if (GET_MODE_CLASS (mode2) == MODE_CC)
2191 return false;
2193 if (PAIRED_VECTOR_MODE (mode1))
2194 return PAIRED_VECTOR_MODE (mode2);
2195 if (PAIRED_VECTOR_MODE (mode2))
2196 return false;
2198 return true;
2201 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2203 static bool
2204 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2206 if (TARGET_32BIT
2207 && TARGET_POWERPC64
2208 && GET_MODE_SIZE (mode) > 4
2209 && INT_REGNO_P (regno))
2210 return true;
2212 if (TARGET_VSX
2213 && FP_REGNO_P (regno)
2214 && GET_MODE_SIZE (mode) > 8
2215 && !FLOAT128_2REG_P (mode))
2216 return true;
2218 return false;
2221 /* Print interesting facts about registers. */
2222 static void
2223 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2225 int r, m;
2227 for (r = first_regno; r <= last_regno; ++r)
2229 const char *comma = "";
2230 int len;
2232 if (first_regno == last_regno)
2233 fprintf (stderr, "%s:\t", reg_name);
2234 else
2235 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2237 len = 8;
2238 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2239 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2241 if (len > 70)
2243 fprintf (stderr, ",\n\t");
2244 len = 8;
2245 comma = "";
2248 if (rs6000_hard_regno_nregs[m][r] > 1)
2249 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2250 rs6000_hard_regno_nregs[m][r]);
2251 else
2252 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2254 comma = ", ";
2257 if (call_used_regs[r])
2259 if (len > 70)
2261 fprintf (stderr, ",\n\t");
2262 len = 8;
2263 comma = "";
2266 len += fprintf (stderr, "%s%s", comma, "call-used");
2267 comma = ", ";
2270 if (fixed_regs[r])
2272 if (len > 70)
2274 fprintf (stderr, ",\n\t");
2275 len = 8;
2276 comma = "";
2279 len += fprintf (stderr, "%s%s", comma, "fixed");
2280 comma = ", ";
2283 if (len > 70)
2285 fprintf (stderr, ",\n\t");
2286 comma = "";
2289 len += fprintf (stderr, "%sreg-class = %s", comma,
2290 reg_class_names[(int)rs6000_regno_regclass[r]]);
2291 comma = ", ";
2293 if (len > 70)
2295 fprintf (stderr, ",\n\t");
2296 comma = "";
2299 fprintf (stderr, "%sregno = %d\n", comma, r);
2303 static const char *
2304 rs6000_debug_vector_unit (enum rs6000_vector v)
2306 const char *ret;
2308 switch (v)
2310 case VECTOR_NONE: ret = "none"; break;
2311 case VECTOR_ALTIVEC: ret = "altivec"; break;
2312 case VECTOR_VSX: ret = "vsx"; break;
2313 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2314 case VECTOR_PAIRED: ret = "paired"; break;
2315 case VECTOR_OTHER: ret = "other"; break;
2316 default: ret = "unknown"; break;
2319 return ret;
2322 /* Inner function printing just the address mask for a particular reload
2323 register class. */
2324 DEBUG_FUNCTION char *
2325 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2327 static char ret[8];
2328 char *p = ret;
2330 if ((mask & RELOAD_REG_VALID) != 0)
2331 *p++ = 'v';
2332 else if (keep_spaces)
2333 *p++ = ' ';
2335 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2336 *p++ = 'm';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_INDEXED) != 0)
2341 *p++ = 'i';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2345 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2346 *p++ = 'O';
2347 else if ((mask & RELOAD_REG_OFFSET) != 0)
2348 *p++ = 'o';
2349 else if (keep_spaces)
2350 *p++ = ' ';
2352 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2353 *p++ = '+';
2354 else if (keep_spaces)
2355 *p++ = ' ';
2357 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2358 *p++ = '+';
2359 else if (keep_spaces)
2360 *p++ = ' ';
2362 if ((mask & RELOAD_REG_AND_M16) != 0)
2363 *p++ = '&';
2364 else if (keep_spaces)
2365 *p++ = ' ';
2367 *p = '\0';
2369 return ret;
2372 /* Print the address masks in a human readble fashion. */
2373 DEBUG_FUNCTION void
2374 rs6000_debug_print_mode (ssize_t m)
2376 ssize_t rc;
2377 int spaces = 0;
2378 bool fuse_extra_p;
2380 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2381 for (rc = 0; rc < N_RELOAD_REG; rc++)
2382 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2383 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2385 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2386 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2387 fprintf (stderr, " Reload=%c%c",
2388 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2389 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2390 else
2391 spaces += sizeof (" Reload=sl") - 1;
2393 if (reg_addr[m].scalar_in_vmx_p)
2395 fprintf (stderr, "%*s Upper=y", spaces, "");
2396 spaces = 0;
2398 else
2399 spaces += sizeof (" Upper=y") - 1;
2401 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2402 || reg_addr[m].fused_toc);
2403 if (!fuse_extra_p)
2405 for (rc = 0; rc < N_RELOAD_REG; rc++)
2407 if (rc != RELOAD_REG_ANY)
2409 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2410 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2411 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2412 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2413 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2415 fuse_extra_p = true;
2416 break;
2422 if (fuse_extra_p)
2424 fprintf (stderr, "%*s Fuse:", spaces, "");
2425 spaces = 0;
2427 for (rc = 0; rc < N_RELOAD_REG; rc++)
2429 if (rc != RELOAD_REG_ANY)
2431 char load, store;
2433 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2434 load = 'l';
2435 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2436 load = 'L';
2437 else
2438 load = '-';
2440 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2441 store = 's';
2442 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2443 store = 'S';
2444 else
2445 store = '-';
2447 if (load == '-' && store == '-')
2448 spaces += 5;
2449 else
2451 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2452 reload_reg_map[rc].name[0], load, store);
2453 spaces = 0;
2458 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2460 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2461 spaces = 0;
2463 else
2464 spaces += sizeof (" P8gpr") - 1;
2466 if (reg_addr[m].fused_toc)
2468 fprintf (stderr, "%*sToc", (spaces + 1), "");
2469 spaces = 0;
2471 else
2472 spaces += sizeof (" Toc") - 1;
2474 else
2475 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2477 if (rs6000_vector_unit[m] != VECTOR_NONE
2478 || rs6000_vector_mem[m] != VECTOR_NONE)
2480 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2481 spaces, "",
2482 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2483 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2486 fputs ("\n", stderr);
2489 #define DEBUG_FMT_ID "%-32s= "
2490 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2491 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2492 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2494 /* Print various interesting information with -mdebug=reg. */
2495 static void
2496 rs6000_debug_reg_global (void)
2498 static const char *const tf[2] = { "false", "true" };
2499 const char *nl = (const char *)0;
2500 int m;
2501 size_t m1, m2, v;
2502 char costly_num[20];
2503 char nop_num[20];
2504 char flags_buffer[40];
2505 const char *costly_str;
2506 const char *nop_str;
2507 const char *trace_str;
2508 const char *abi_str;
2509 const char *cmodel_str;
2510 struct cl_target_option cl_opts;
2512 /* Modes we want tieable information on. */
2513 static const machine_mode print_tieable_modes[] = {
2514 QImode,
2515 HImode,
2516 SImode,
2517 DImode,
2518 TImode,
2519 PTImode,
2520 SFmode,
2521 DFmode,
2522 TFmode,
2523 IFmode,
2524 KFmode,
2525 SDmode,
2526 DDmode,
2527 TDmode,
2528 V2SImode,
2529 V16QImode,
2530 V8HImode,
2531 V4SImode,
2532 V2DImode,
2533 V1TImode,
2534 V32QImode,
2535 V16HImode,
2536 V8SImode,
2537 V4DImode,
2538 V2TImode,
2539 V2SFmode,
2540 V4SFmode,
2541 V2DFmode,
2542 V8SFmode,
2543 V4DFmode,
2544 CCmode,
2545 CCUNSmode,
2546 CCEQmode,
2549 /* Virtual regs we are interested in. */
2550 const static struct {
2551 int regno; /* register number. */
2552 const char *name; /* register name. */
2553 } virtual_regs[] = {
2554 { STACK_POINTER_REGNUM, "stack pointer:" },
2555 { TOC_REGNUM, "toc: " },
2556 { STATIC_CHAIN_REGNUM, "static chain: " },
2557 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2558 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2559 { ARG_POINTER_REGNUM, "arg pointer: " },
2560 { FRAME_POINTER_REGNUM, "frame pointer:" },
2561 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2562 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2563 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2564 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2565 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2566 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2567 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2568 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2569 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2572 fputs ("\nHard register information:\n", stderr);
2573 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2574 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2575 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2576 LAST_ALTIVEC_REGNO,
2577 "vs");
2578 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2579 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2580 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2581 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2582 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2583 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2585 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2586 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2587 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2589 fprintf (stderr,
2590 "\n"
2591 "d reg_class = %s\n"
2592 "f reg_class = %s\n"
2593 "v reg_class = %s\n"
2594 "wa reg_class = %s\n"
2595 "wb reg_class = %s\n"
2596 "wd reg_class = %s\n"
2597 "we reg_class = %s\n"
2598 "wf reg_class = %s\n"
2599 "wg reg_class = %s\n"
2600 "wh reg_class = %s\n"
2601 "wi reg_class = %s\n"
2602 "wj reg_class = %s\n"
2603 "wk reg_class = %s\n"
2604 "wl reg_class = %s\n"
2605 "wm reg_class = %s\n"
2606 "wo reg_class = %s\n"
2607 "wp reg_class = %s\n"
2608 "wq reg_class = %s\n"
2609 "wr reg_class = %s\n"
2610 "ws reg_class = %s\n"
2611 "wt reg_class = %s\n"
2612 "wu reg_class = %s\n"
2613 "wv reg_class = %s\n"
2614 "ww reg_class = %s\n"
2615 "wx reg_class = %s\n"
2616 "wy reg_class = %s\n"
2617 "wz reg_class = %s\n"
2618 "wA reg_class = %s\n"
2619 "wH reg_class = %s\n"
2620 "wI reg_class = %s\n"
2621 "wJ reg_class = %s\n"
2622 "wK reg_class = %s\n"
2623 "\n",
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2651 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2652 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2653 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2654 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2655 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2657 nl = "\n";
2658 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2659 rs6000_debug_print_mode (m);
2661 fputs ("\n", stderr);
2663 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2665 machine_mode mode1 = print_tieable_modes[m1];
2666 bool first_time = true;
2668 nl = (const char *)0;
2669 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2671 machine_mode mode2 = print_tieable_modes[m2];
2672 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2674 if (first_time)
2676 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2677 nl = "\n";
2678 first_time = false;
2681 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2685 if (!first_time)
2686 fputs ("\n", stderr);
2689 if (nl)
2690 fputs (nl, stderr);
2692 if (rs6000_recip_control)
2694 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2696 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2697 if (rs6000_recip_bits[m])
2699 fprintf (stderr,
2700 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2701 GET_MODE_NAME (m),
2702 (RS6000_RECIP_AUTO_RE_P (m)
2703 ? "auto"
2704 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2705 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2706 ? "auto"
2707 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2710 fputs ("\n", stderr);
2713 if (rs6000_cpu_index >= 0)
2715 const char *name = processor_target_table[rs6000_cpu_index].name;
2716 HOST_WIDE_INT flags
2717 = processor_target_table[rs6000_cpu_index].target_enable;
2719 sprintf (flags_buffer, "-mcpu=%s flags", name);
2720 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2722 else
2723 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2725 if (rs6000_tune_index >= 0)
2727 const char *name = processor_target_table[rs6000_tune_index].name;
2728 HOST_WIDE_INT flags
2729 = processor_target_table[rs6000_tune_index].target_enable;
2731 sprintf (flags_buffer, "-mtune=%s flags", name);
2732 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2734 else
2735 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2737 cl_target_option_save (&cl_opts, &global_options);
2738 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2739 rs6000_isa_flags);
2741 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2742 rs6000_isa_flags_explicit);
2744 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2745 rs6000_builtin_mask);
2747 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2749 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2750 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2752 switch (rs6000_sched_costly_dep)
2754 case max_dep_latency:
2755 costly_str = "max_dep_latency";
2756 break;
2758 case no_dep_costly:
2759 costly_str = "no_dep_costly";
2760 break;
2762 case all_deps_costly:
2763 costly_str = "all_deps_costly";
2764 break;
2766 case true_store_to_load_dep_costly:
2767 costly_str = "true_store_to_load_dep_costly";
2768 break;
2770 case store_to_load_dep_costly:
2771 costly_str = "store_to_load_dep_costly";
2772 break;
2774 default:
2775 costly_str = costly_num;
2776 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2777 break;
2780 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2782 switch (rs6000_sched_insert_nops)
2784 case sched_finish_regroup_exact:
2785 nop_str = "sched_finish_regroup_exact";
2786 break;
2788 case sched_finish_pad_groups:
2789 nop_str = "sched_finish_pad_groups";
2790 break;
2792 case sched_finish_none:
2793 nop_str = "sched_finish_none";
2794 break;
2796 default:
2797 nop_str = nop_num;
2798 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2799 break;
2802 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2804 switch (rs6000_sdata)
2806 default:
2807 case SDATA_NONE:
2808 break;
2810 case SDATA_DATA:
2811 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2812 break;
2814 case SDATA_SYSV:
2815 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2816 break;
2818 case SDATA_EABI:
2819 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2820 break;
2824 switch (rs6000_traceback)
2826 case traceback_default: trace_str = "default"; break;
2827 case traceback_none: trace_str = "none"; break;
2828 case traceback_part: trace_str = "part"; break;
2829 case traceback_full: trace_str = "full"; break;
2830 default: trace_str = "unknown"; break;
2833 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2835 switch (rs6000_current_cmodel)
2837 case CMODEL_SMALL: cmodel_str = "small"; break;
2838 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2839 case CMODEL_LARGE: cmodel_str = "large"; break;
2840 default: cmodel_str = "unknown"; break;
2843 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2845 switch (rs6000_current_abi)
2847 case ABI_NONE: abi_str = "none"; break;
2848 case ABI_AIX: abi_str = "aix"; break;
2849 case ABI_ELFv2: abi_str = "ELFv2"; break;
2850 case ABI_V4: abi_str = "V4"; break;
2851 case ABI_DARWIN: abi_str = "darwin"; break;
2852 default: abi_str = "unknown"; break;
2855 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2857 if (rs6000_altivec_abi)
2858 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2860 if (rs6000_darwin64_abi)
2861 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2863 fprintf (stderr, DEBUG_FMT_S, "single_float",
2864 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2866 fprintf (stderr, DEBUG_FMT_S, "double_float",
2867 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2869 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2870 (TARGET_SOFT_FLOAT ? "true" : "false"));
2872 if (TARGET_LINK_STACK)
2873 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2875 if (TARGET_P8_FUSION)
2877 char options[80];
2879 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2880 if (TARGET_TOC_FUSION)
2881 strcat (options, ", toc");
2883 if (TARGET_P8_FUSION_SIGN)
2884 strcat (options, ", sign");
2886 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2889 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2890 TARGET_SECURE_PLT ? "secure" : "bss");
2891 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2892 aix_struct_return ? "aix" : "sysv");
2893 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2894 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2895 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2896 tf[!!rs6000_align_branch_targets]);
2897 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2898 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2899 rs6000_long_double_type_size);
2900 if (rs6000_long_double_type_size == 128)
2902 fprintf (stderr, DEBUG_FMT_S, "long double type",
2903 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2904 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2905 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2907 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2908 (int)rs6000_sched_restricted_insns_priority);
2909 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2910 (int)END_BUILTINS);
2911 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2912 (int)RS6000_BUILTIN_COUNT);
2914 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2915 (int)TARGET_FLOAT128_ENABLE_TYPE);
2917 if (TARGET_VSX)
2918 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2919 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2921 if (TARGET_DIRECT_MOVE_128)
2922 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2923 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2927 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2928 legitimate address support to figure out the appropriate addressing to
2929 use. */
2931 static void
2932 rs6000_setup_reg_addr_masks (void)
2934 ssize_t rc, reg, m, nregs;
2935 addr_mask_type any_addr_mask, addr_mask;
2937 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2939 machine_mode m2 = (machine_mode) m;
2940 bool complex_p = false;
2941 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2942 size_t msize;
2944 if (COMPLEX_MODE_P (m2))
2946 complex_p = true;
2947 m2 = GET_MODE_INNER (m2);
2950 msize = GET_MODE_SIZE (m2);
2952 /* SDmode is special in that we want to access it only via REG+REG
2953 addressing on power7 and above, since we want to use the LFIWZX and
2954 STFIWZX instructions to load it. */
2955 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2957 any_addr_mask = 0;
2958 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2960 addr_mask = 0;
2961 reg = reload_reg_map[rc].reg;
2963 /* Can mode values go in the GPR/FPR/Altivec registers? */
2964 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2966 bool small_int_vsx_p = (small_int_p
2967 && (rc == RELOAD_REG_FPR
2968 || rc == RELOAD_REG_VMX));
2970 nregs = rs6000_hard_regno_nregs[m][reg];
2971 addr_mask |= RELOAD_REG_VALID;
2973 /* Indicate if the mode takes more than 1 physical register. If
2974 it takes a single register, indicate it can do REG+REG
2975 addressing. Small integers in VSX registers can only do
2976 REG+REG addressing. */
2977 if (small_int_vsx_p)
2978 addr_mask |= RELOAD_REG_INDEXED;
2979 else if (nregs > 1 || m == BLKmode || complex_p)
2980 addr_mask |= RELOAD_REG_MULTIPLE;
2981 else
2982 addr_mask |= RELOAD_REG_INDEXED;
2984 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2985 addressing. If we allow scalars into Altivec registers,
2986 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2988 For VSX systems, we don't allow update addressing for
2989 DFmode/SFmode if those registers can go in both the
2990 traditional floating point registers and Altivec registers.
2991 The load/store instructions for the Altivec registers do not
2992 have update forms. If we allowed update addressing, it seems
2993 to break IV-OPT code using floating point if the index type is
2994 int instead of long (PR target/81550 and target/84042). */
2996 if (TARGET_UPDATE
2997 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2998 && msize <= 8
2999 && !VECTOR_MODE_P (m2)
3000 && !FLOAT128_VECTOR_P (m2)
3001 && !complex_p
3002 && (m != E_DFmode || !TARGET_VSX)
3003 && (m != E_SFmode || !TARGET_P8_VECTOR)
3004 && !small_int_vsx_p)
3006 addr_mask |= RELOAD_REG_PRE_INCDEC;
3008 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
3009 we don't allow PRE_MODIFY for some multi-register
3010 operations. */
3011 switch (m)
3013 default:
3014 addr_mask |= RELOAD_REG_PRE_MODIFY;
3015 break;
3017 case E_DImode:
3018 if (TARGET_POWERPC64)
3019 addr_mask |= RELOAD_REG_PRE_MODIFY;
3020 break;
3022 case E_DFmode:
3023 case E_DDmode:
3024 if (TARGET_DF_INSN)
3025 addr_mask |= RELOAD_REG_PRE_MODIFY;
3026 break;
3031 /* GPR and FPR registers can do REG+OFFSET addressing, except
3032 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3033 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3034 if ((addr_mask != 0) && !indexed_only_p
3035 && msize <= 8
3036 && (rc == RELOAD_REG_GPR
3037 || ((msize == 8 || m2 == SFmode)
3038 && (rc == RELOAD_REG_FPR
3039 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3040 addr_mask |= RELOAD_REG_OFFSET;
3042 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3043 instructions are enabled. The offset for 128-bit VSX registers is
3044 only 12-bits. While GPRs can handle the full offset range, VSX
3045 registers can only handle the restricted range. */
3046 else if ((addr_mask != 0) && !indexed_only_p
3047 && msize == 16 && TARGET_P9_VECTOR
3048 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3049 || (m2 == TImode && TARGET_VSX)))
3051 addr_mask |= RELOAD_REG_OFFSET;
3052 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3053 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3056 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3057 addressing on 128-bit types. */
3058 if (rc == RELOAD_REG_VMX && msize == 16
3059 && (addr_mask & RELOAD_REG_VALID) != 0)
3060 addr_mask |= RELOAD_REG_AND_M16;
3062 reg_addr[m].addr_mask[rc] = addr_mask;
3063 any_addr_mask |= addr_mask;
3066 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3071 /* Initialize the various global tables that are based on register size. */
3072 static void
3073 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3075 ssize_t r, m, c;
3076 int align64;
3077 int align32;
3079 /* Precalculate REGNO_REG_CLASS. */
3080 rs6000_regno_regclass[0] = GENERAL_REGS;
3081 for (r = 1; r < 32; ++r)
3082 rs6000_regno_regclass[r] = BASE_REGS;
3084 for (r = 32; r < 64; ++r)
3085 rs6000_regno_regclass[r] = FLOAT_REGS;
3087 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3088 rs6000_regno_regclass[r] = NO_REGS;
3090 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3091 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3093 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3094 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3095 rs6000_regno_regclass[r] = CR_REGS;
3097 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3098 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3099 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3100 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3101 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3102 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3103 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3104 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3105 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3106 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3108 /* Precalculate register class to simpler reload register class. We don't
3109 need all of the register classes that are combinations of different
3110 classes, just the simple ones that have constraint letters. */
3111 for (c = 0; c < N_REG_CLASSES; c++)
3112 reg_class_to_reg_type[c] = NO_REG_TYPE;
3114 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3115 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3116 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3117 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3118 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3119 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3120 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3121 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3122 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3123 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3125 if (TARGET_VSX)
3127 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3128 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3130 else
3132 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3133 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3136 /* Precalculate the valid memory formats as well as the vector information,
3137 this must be set up before the rs6000_hard_regno_nregs_internal calls
3138 below. */
3139 gcc_assert ((int)VECTOR_NONE == 0);
3140 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3141 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3143 gcc_assert ((int)CODE_FOR_nothing == 0);
3144 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3146 gcc_assert ((int)NO_REGS == 0);
3147 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3149 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3150 believes it can use native alignment or still uses 128-bit alignment. */
3151 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3153 align64 = 64;
3154 align32 = 32;
3156 else
3158 align64 = 128;
3159 align32 = 128;
3162 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3163 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3164 if (TARGET_FLOAT128_TYPE)
3166 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3167 rs6000_vector_align[KFmode] = 128;
3169 if (FLOAT128_IEEE_P (TFmode))
3171 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3172 rs6000_vector_align[TFmode] = 128;
3176 /* V2DF mode, VSX only. */
3177 if (TARGET_VSX)
3179 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3180 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3181 rs6000_vector_align[V2DFmode] = align64;
3184 /* V4SF mode, either VSX or Altivec. */
3185 if (TARGET_VSX)
3187 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3188 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3189 rs6000_vector_align[V4SFmode] = align32;
3191 else if (TARGET_ALTIVEC)
3193 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3194 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3195 rs6000_vector_align[V4SFmode] = align32;
3198 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3199 and stores. */
3200 if (TARGET_ALTIVEC)
3202 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3203 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3204 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3205 rs6000_vector_align[V4SImode] = align32;
3206 rs6000_vector_align[V8HImode] = align32;
3207 rs6000_vector_align[V16QImode] = align32;
3209 if (TARGET_VSX)
3211 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3212 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3213 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3215 else
3217 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3218 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3219 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3223 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3224 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3225 if (TARGET_VSX)
3227 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3228 rs6000_vector_unit[V2DImode]
3229 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3230 rs6000_vector_align[V2DImode] = align64;
3232 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3233 rs6000_vector_unit[V1TImode]
3234 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3235 rs6000_vector_align[V1TImode] = 128;
3238 /* DFmode, see if we want to use the VSX unit. Memory is handled
3239 differently, so don't set rs6000_vector_mem. */
3240 if (TARGET_VSX)
3242 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3243 rs6000_vector_align[DFmode] = 64;
3246 /* SFmode, see if we want to use the VSX unit. */
3247 if (TARGET_P8_VECTOR)
3249 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3250 rs6000_vector_align[SFmode] = 32;
3253 /* Allow TImode in VSX register and set the VSX memory macros. */
3254 if (TARGET_VSX)
3256 rs6000_vector_mem[TImode] = VECTOR_VSX;
3257 rs6000_vector_align[TImode] = align64;
3260 /* TODO add paired floating point vector support. */
3262 /* Register class constraints for the constraints that depend on compile
3263 switches. When the VSX code was added, different constraints were added
3264 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3265 of the VSX registers are used. The register classes for scalar floating
3266 point types is set, based on whether we allow that type into the upper
3267 (Altivec) registers. GCC has register classes to target the Altivec
3268 registers for load/store operations, to select using a VSX memory
3269 operation instead of the traditional floating point operation. The
3270 constraints are:
3272 d - Register class to use with traditional DFmode instructions.
3273 f - Register class to use with traditional SFmode instructions.
3274 v - Altivec register.
3275 wa - Any VSX register.
3276 wc - Reserved to represent individual CR bits (used in LLVM).
3277 wd - Preferred register class for V2DFmode.
3278 wf - Preferred register class for V4SFmode.
3279 wg - Float register for power6x move insns.
3280 wh - FP register for direct move instructions.
3281 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3282 wj - FP or VSX register to hold 64-bit integers for direct moves.
3283 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3284 wl - Float register if we can do 32-bit signed int loads.
3285 wm - VSX register for ISA 2.07 direct move operations.
3286 wn - always NO_REGS.
3287 wr - GPR if 64-bit mode is permitted.
3288 ws - Register class to do ISA 2.06 DF operations.
3289 wt - VSX register for TImode in VSX registers.
3290 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3291 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3292 ww - Register class to do SF conversions in with VSX operations.
3293 wx - Float register if we can do 32-bit int stores.
3294 wy - Register class to do ISA 2.07 SF operations.
3295 wz - Float register if we can do 32-bit unsigned int loads.
3296 wH - Altivec register if SImode is allowed in VSX registers.
3297 wI - VSX register if SImode is allowed in VSX registers.
3298 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3299 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3301 if (TARGET_HARD_FLOAT)
3302 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3304 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3305 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3307 if (TARGET_VSX)
3309 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3310 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3311 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3312 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3313 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3314 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3315 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3318 /* Add conditional constraints based on various options, to allow us to
3319 collapse multiple insn patterns. */
3320 if (TARGET_ALTIVEC)
3321 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3323 if (TARGET_MFPGPR) /* DFmode */
3324 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3326 if (TARGET_LFIWAX)
3327 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3329 if (TARGET_DIRECT_MOVE)
3331 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3332 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3333 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3334 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3335 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3336 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3339 if (TARGET_POWERPC64)
3341 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3342 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3345 if (TARGET_P8_VECTOR) /* SFmode */
3347 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3348 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3349 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3351 else if (TARGET_VSX)
3352 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3354 if (TARGET_STFIWX)
3355 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3357 if (TARGET_LFIWZX)
3358 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3360 if (TARGET_FLOAT128_TYPE)
3362 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3363 if (FLOAT128_IEEE_P (TFmode))
3364 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3367 if (TARGET_P9_VECTOR)
3369 /* Support for new D-form instructions. */
3370 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3372 /* Support for ISA 3.0 (power9) vectors. */
3373 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3376 /* Support for new direct moves (ISA 3.0 + 64bit). */
3377 if (TARGET_DIRECT_MOVE_128)
3378 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3380 /* Support small integers in VSX registers. */
3381 if (TARGET_P8_VECTOR)
3383 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3384 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3385 if (TARGET_P9_VECTOR)
3387 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3388 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3392 /* Set up the reload helper and direct move functions. */
3393 if (TARGET_VSX || TARGET_ALTIVEC)
3395 if (TARGET_64BIT)
3397 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3398 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3399 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3400 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3401 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3402 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3403 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3404 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3405 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3406 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3407 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3408 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3409 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3410 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3411 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3412 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3413 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3414 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3415 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3416 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3418 if (FLOAT128_VECTOR_P (KFmode))
3420 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3421 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3424 if (FLOAT128_VECTOR_P (TFmode))
3426 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3427 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3430 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3431 available. */
3432 if (TARGET_NO_SDMODE_STACK)
3434 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3435 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3438 if (TARGET_VSX)
3440 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3441 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3444 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3446 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3447 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3448 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3449 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3450 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3451 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3452 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3453 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3454 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3456 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3457 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3458 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3459 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3460 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3461 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3462 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3463 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3464 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3466 if (FLOAT128_VECTOR_P (KFmode))
3468 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3469 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3472 if (FLOAT128_VECTOR_P (TFmode))
3474 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3475 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3479 else
3481 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3482 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3483 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3484 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3485 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3486 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3487 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3488 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3489 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3490 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3491 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3492 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3493 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3494 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3495 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3496 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3497 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3498 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3499 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3500 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3502 if (FLOAT128_VECTOR_P (KFmode))
3504 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3505 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3508 if (FLOAT128_IEEE_P (TFmode))
3510 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3511 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3514 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3515 available. */
3516 if (TARGET_NO_SDMODE_STACK)
3518 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3519 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3522 if (TARGET_VSX)
3524 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3525 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3528 if (TARGET_DIRECT_MOVE)
3530 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3531 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3532 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3536 reg_addr[DFmode].scalar_in_vmx_p = true;
3537 reg_addr[DImode].scalar_in_vmx_p = true;
3539 if (TARGET_P8_VECTOR)
3541 reg_addr[SFmode].scalar_in_vmx_p = true;
3542 reg_addr[SImode].scalar_in_vmx_p = true;
3544 if (TARGET_P9_VECTOR)
3546 reg_addr[HImode].scalar_in_vmx_p = true;
3547 reg_addr[QImode].scalar_in_vmx_p = true;
3552 /* Setup the fusion operations. */
3553 if (TARGET_P8_FUSION)
3555 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3556 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3557 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3558 if (TARGET_64BIT)
3559 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3562 if (TARGET_P9_FUSION)
3564 struct fuse_insns {
3565 enum machine_mode mode; /* mode of the fused type. */
3566 enum machine_mode pmode; /* pointer mode. */
3567 enum rs6000_reload_reg_type rtype; /* register type. */
3568 enum insn_code load; /* load insn. */
3569 enum insn_code store; /* store insn. */
3572 static const struct fuse_insns addis_insns[] = {
3573 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3574 CODE_FOR_fusion_vsx_di_sf_load,
3575 CODE_FOR_fusion_vsx_di_sf_store },
3577 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3578 CODE_FOR_fusion_vsx_si_sf_load,
3579 CODE_FOR_fusion_vsx_si_sf_store },
3581 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3582 CODE_FOR_fusion_vsx_di_df_load,
3583 CODE_FOR_fusion_vsx_di_df_store },
3585 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3586 CODE_FOR_fusion_vsx_si_df_load,
3587 CODE_FOR_fusion_vsx_si_df_store },
3589 { E_DImode, E_DImode, RELOAD_REG_FPR,
3590 CODE_FOR_fusion_vsx_di_di_load,
3591 CODE_FOR_fusion_vsx_di_di_store },
3593 { E_DImode, E_SImode, RELOAD_REG_FPR,
3594 CODE_FOR_fusion_vsx_si_di_load,
3595 CODE_FOR_fusion_vsx_si_di_store },
3597 { E_QImode, E_DImode, RELOAD_REG_GPR,
3598 CODE_FOR_fusion_gpr_di_qi_load,
3599 CODE_FOR_fusion_gpr_di_qi_store },
3601 { E_QImode, E_SImode, RELOAD_REG_GPR,
3602 CODE_FOR_fusion_gpr_si_qi_load,
3603 CODE_FOR_fusion_gpr_si_qi_store },
3605 { E_HImode, E_DImode, RELOAD_REG_GPR,
3606 CODE_FOR_fusion_gpr_di_hi_load,
3607 CODE_FOR_fusion_gpr_di_hi_store },
3609 { E_HImode, E_SImode, RELOAD_REG_GPR,
3610 CODE_FOR_fusion_gpr_si_hi_load,
3611 CODE_FOR_fusion_gpr_si_hi_store },
3613 { E_SImode, E_DImode, RELOAD_REG_GPR,
3614 CODE_FOR_fusion_gpr_di_si_load,
3615 CODE_FOR_fusion_gpr_di_si_store },
3617 { E_SImode, E_SImode, RELOAD_REG_GPR,
3618 CODE_FOR_fusion_gpr_si_si_load,
3619 CODE_FOR_fusion_gpr_si_si_store },
3621 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3622 CODE_FOR_fusion_gpr_di_sf_load,
3623 CODE_FOR_fusion_gpr_di_sf_store },
3625 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3626 CODE_FOR_fusion_gpr_si_sf_load,
3627 CODE_FOR_fusion_gpr_si_sf_store },
3629 { E_DImode, E_DImode, RELOAD_REG_GPR,
3630 CODE_FOR_fusion_gpr_di_di_load,
3631 CODE_FOR_fusion_gpr_di_di_store },
3633 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3634 CODE_FOR_fusion_gpr_di_df_load,
3635 CODE_FOR_fusion_gpr_di_df_store },
3638 machine_mode cur_pmode = Pmode;
3639 size_t i;
3641 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3643 machine_mode xmode = addis_insns[i].mode;
3644 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3646 if (addis_insns[i].pmode != cur_pmode)
3647 continue;
3649 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3650 continue;
3652 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3653 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3655 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3657 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3658 = addis_insns[i].load;
3659 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3660 = addis_insns[i].store;
3665 /* Note which types we support fusing TOC setup plus memory insn. We only do
3666 fused TOCs for medium/large code models. */
3667 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3668 && (TARGET_CMODEL != CMODEL_SMALL))
3670 reg_addr[QImode].fused_toc = true;
3671 reg_addr[HImode].fused_toc = true;
3672 reg_addr[SImode].fused_toc = true;
3673 reg_addr[DImode].fused_toc = true;
3674 if (TARGET_HARD_FLOAT)
3676 if (TARGET_SINGLE_FLOAT)
3677 reg_addr[SFmode].fused_toc = true;
3678 if (TARGET_DOUBLE_FLOAT)
3679 reg_addr[DFmode].fused_toc = true;
3683 /* Precalculate HARD_REGNO_NREGS. */
3684 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3685 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3686 rs6000_hard_regno_nregs[m][r]
3687 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3689 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3690 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3691 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3692 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3693 rs6000_hard_regno_mode_ok_p[m][r] = true;
3695 /* Precalculate CLASS_MAX_NREGS sizes. */
3696 for (c = 0; c < LIM_REG_CLASSES; ++c)
3698 int reg_size;
3700 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3701 reg_size = UNITS_PER_VSX_WORD;
3703 else if (c == ALTIVEC_REGS)
3704 reg_size = UNITS_PER_ALTIVEC_WORD;
3706 else if (c == FLOAT_REGS)
3707 reg_size = UNITS_PER_FP_WORD;
3709 else
3710 reg_size = UNITS_PER_WORD;
3712 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3714 machine_mode m2 = (machine_mode)m;
3715 int reg_size2 = reg_size;
3717 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3718 in VSX. */
3719 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3720 reg_size2 = UNITS_PER_FP_WORD;
3722 rs6000_class_max_nregs[m][c]
3723 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3727 /* Calculate which modes to automatically generate code to use a the
3728 reciprocal divide and square root instructions. In the future, possibly
3729 automatically generate the instructions even if the user did not specify
3730 -mrecip. The older machines double precision reciprocal sqrt estimate is
3731 not accurate enough. */
3732 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3733 if (TARGET_FRES)
3734 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3735 if (TARGET_FRE)
3736 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3737 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3738 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3739 if (VECTOR_UNIT_VSX_P (V2DFmode))
3740 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3742 if (TARGET_FRSQRTES)
3743 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3744 if (TARGET_FRSQRTE)
3745 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3746 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3747 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3748 if (VECTOR_UNIT_VSX_P (V2DFmode))
3749 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3751 if (rs6000_recip_control)
3753 if (!flag_finite_math_only)
3754 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3755 "-ffast-math");
3756 if (flag_trapping_math)
3757 warning (0, "%qs requires %qs or %qs", "-mrecip",
3758 "-fno-trapping-math", "-ffast-math");
3759 if (!flag_reciprocal_math)
3760 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3761 "-ffast-math");
3762 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3764 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3765 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3766 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3768 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3769 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3770 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3772 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3773 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3774 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3776 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3777 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3778 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3780 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3781 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3782 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3784 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3785 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3786 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3788 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3789 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3790 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3792 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3793 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3794 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3798 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3799 legitimate address support to figure out the appropriate addressing to
3800 use. */
3801 rs6000_setup_reg_addr_masks ();
3803 if (global_init_p || TARGET_DEBUG_TARGET)
3805 if (TARGET_DEBUG_REG)
3806 rs6000_debug_reg_global ();
3808 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3809 fprintf (stderr,
3810 "SImode variable mult cost = %d\n"
3811 "SImode constant mult cost = %d\n"
3812 "SImode short constant mult cost = %d\n"
3813 "DImode multipliciation cost = %d\n"
3814 "SImode division cost = %d\n"
3815 "DImode division cost = %d\n"
3816 "Simple fp operation cost = %d\n"
3817 "DFmode multiplication cost = %d\n"
3818 "SFmode division cost = %d\n"
3819 "DFmode division cost = %d\n"
3820 "cache line size = %d\n"
3821 "l1 cache size = %d\n"
3822 "l2 cache size = %d\n"
3823 "simultaneous prefetches = %d\n"
3824 "\n",
3825 rs6000_cost->mulsi,
3826 rs6000_cost->mulsi_const,
3827 rs6000_cost->mulsi_const9,
3828 rs6000_cost->muldi,
3829 rs6000_cost->divsi,
3830 rs6000_cost->divdi,
3831 rs6000_cost->fp,
3832 rs6000_cost->dmul,
3833 rs6000_cost->sdiv,
3834 rs6000_cost->ddiv,
3835 rs6000_cost->cache_line_size,
3836 rs6000_cost->l1_cache_size,
3837 rs6000_cost->l2_cache_size,
3838 rs6000_cost->simultaneous_prefetches);
3842 #if TARGET_MACHO
3843 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3845 static void
3846 darwin_rs6000_override_options (void)
3848 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3849 off. */
3850 rs6000_altivec_abi = 1;
3851 TARGET_ALTIVEC_VRSAVE = 1;
3852 rs6000_current_abi = ABI_DARWIN;
3854 if (DEFAULT_ABI == ABI_DARWIN
3855 && TARGET_64BIT)
3856 darwin_one_byte_bool = 1;
3858 if (TARGET_64BIT && ! TARGET_POWERPC64)
3860 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3861 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3863 if (flag_mkernel)
3865 rs6000_default_long_calls = 1;
3866 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3869 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3870 Altivec. */
3871 if (!flag_mkernel && !flag_apple_kext
3872 && TARGET_64BIT
3873 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3874 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3876 /* Unless the user (not the configurer) has explicitly overridden
3877 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3878 G4 unless targeting the kernel. */
3879 if (!flag_mkernel
3880 && !flag_apple_kext
3881 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3882 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3883 && ! global_options_set.x_rs6000_cpu_index)
3885 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3888 #endif
3890 /* If not otherwise specified by a target, make 'long double' equivalent to
3891 'double'. */
3893 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3894 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3895 #endif
3897 /* Return the builtin mask of the various options used that could affect which
3898 builtins were used. In the past we used target_flags, but we've run out of
3899 bits, and some options like PAIRED are no longer in target_flags. */
3901 HOST_WIDE_INT
3902 rs6000_builtin_mask_calculate (void)
3904 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3905 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3906 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3907 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3908 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3909 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3910 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3911 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3912 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3913 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3914 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3915 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3916 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3917 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3918 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3919 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3920 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3921 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3922 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3923 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3924 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3925 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3928 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3929 to clobber the XER[CA] bit because clobbering that bit without telling
3930 the compiler worked just fine with versions of GCC before GCC 5, and
3931 breaking a lot of older code in ways that are hard to track down is
3932 not such a great idea. */
3934 static rtx_insn *
3935 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3936 vec<const char *> &/*constraints*/,
3937 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3939 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3940 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3941 return NULL;
3944 /* Override command line options.
3946 Combine build-specific configuration information with options
3947 specified on the command line to set various state variables which
3948 influence code generation, optimization, and expansion of built-in
3949 functions. Assure that command-line configuration preferences are
3950 compatible with each other and with the build configuration; issue
3951 warnings while adjusting configuration or error messages while
3952 rejecting configuration.
3954 Upon entry to this function:
3956 This function is called once at the beginning of
3957 compilation, and then again at the start and end of compiling
3958 each section of code that has a different configuration, as
3959 indicated, for example, by adding the
3961 __attribute__((__target__("cpu=power9")))
3963 qualifier to a function definition or, for example, by bracketing
3964 code between
3966 #pragma GCC target("altivec")
3970 #pragma GCC reset_options
3972 directives. Parameter global_init_p is true for the initial
3973 invocation, which initializes global variables, and false for all
3974 subsequent invocations.
3977 Various global state information is assumed to be valid. This
3978 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3979 default CPU specified at build configure time, TARGET_DEFAULT,
3980 representing the default set of option flags for the default
3981 target, and global_options_set.x_rs6000_isa_flags, representing
3982 which options were requested on the command line.
3984 Upon return from this function:
3986 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3987 was set by name on the command line. Additionally, if certain
3988 attributes are automatically enabled or disabled by this function
3989 in order to assure compatibility between options and
3990 configuration, the flags associated with those attributes are
3991 also set. By setting these "explicit bits", we avoid the risk
3992 that other code might accidentally overwrite these particular
3993 attributes with "default values".
3995 The various bits of rs6000_isa_flags are set to indicate the
3996 target options that have been selected for the most current
3997 compilation efforts. This has the effect of also turning on the
3998 associated TARGET_XXX values since these are macros which are
3999 generally defined to test the corresponding bit of the
4000 rs6000_isa_flags variable.
4002 The variable rs6000_builtin_mask is set to represent the target
4003 options for the most current compilation efforts, consistent with
4004 the current contents of rs6000_isa_flags. This variable controls
4005 expansion of built-in functions.
4007 Various other global variables and fields of global structures
4008 (over 50 in all) are initialized to reflect the desired options
4009 for the most current compilation efforts. */
4011 static bool
4012 rs6000_option_override_internal (bool global_init_p)
4014 bool ret = true;
4016 HOST_WIDE_INT set_masks;
4017 HOST_WIDE_INT ignore_masks;
4018 int cpu_index = -1;
4019 int tune_index;
4020 struct cl_target_option *main_target_opt
4021 = ((global_init_p || target_option_default_node == NULL)
4022 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4024 /* Print defaults. */
4025 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4026 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4028 /* Remember the explicit arguments. */
4029 if (global_init_p)
4030 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4032 /* We plan to deprecate the -maltivec=be option. For now, just
4033 issue a warning message. */
4034 if (global_init_p
4035 && rs6000_altivec_element_order == 2)
4036 warning (0, "%qs command-line option is deprecated",
4037 "-maltivec=be");
4039 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4040 library functions, so warn about it. The flag may be useful for
4041 performance studies from time to time though, so don't disable it
4042 entirely. */
4043 if (global_options_set.x_rs6000_alignment_flags
4044 && rs6000_alignment_flags == MASK_ALIGN_POWER
4045 && DEFAULT_ABI == ABI_DARWIN
4046 && TARGET_64BIT)
4047 warning (0, "%qs is not supported for 64-bit Darwin;"
4048 " it is incompatible with the installed C and C++ libraries",
4049 "-malign-power");
4051 /* Numerous experiment shows that IRA based loop pressure
4052 calculation works better for RTL loop invariant motion on targets
4053 with enough (>= 32) registers. It is an expensive optimization.
4054 So it is on only for peak performance. */
4055 if (optimize >= 3 && global_init_p
4056 && !global_options_set.x_flag_ira_loop_pressure)
4057 flag_ira_loop_pressure = 1;
4059 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4060 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4061 options were already specified. */
4062 if (flag_sanitize & SANITIZE_USER_ADDRESS
4063 && !global_options_set.x_flag_asynchronous_unwind_tables)
4064 flag_asynchronous_unwind_tables = 1;
4066 /* Set the pointer size. */
4067 if (TARGET_64BIT)
4069 rs6000_pmode = DImode;
4070 rs6000_pointer_size = 64;
4072 else
4074 rs6000_pmode = SImode;
4075 rs6000_pointer_size = 32;
4078 /* Some OSs don't support saving the high part of 64-bit registers on context
4079 switch. Other OSs don't support saving Altivec registers. On those OSs,
4080 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4081 if the user wants either, the user must explicitly specify them and we
4082 won't interfere with the user's specification. */
4084 set_masks = POWERPC_MASKS;
4085 #ifdef OS_MISSING_POWERPC64
4086 if (OS_MISSING_POWERPC64)
4087 set_masks &= ~OPTION_MASK_POWERPC64;
4088 #endif
4089 #ifdef OS_MISSING_ALTIVEC
4090 if (OS_MISSING_ALTIVEC)
4091 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4092 | OTHER_VSX_VECTOR_MASKS);
4093 #endif
4095 /* Don't override by the processor default if given explicitly. */
4096 set_masks &= ~rs6000_isa_flags_explicit;
4098 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4099 the cpu in a target attribute or pragma, but did not specify a tuning
4100 option, use the cpu for the tuning option rather than the option specified
4101 with -mtune on the command line. Process a '--with-cpu' configuration
4102 request as an implicit --cpu. */
4103 if (rs6000_cpu_index >= 0)
4104 cpu_index = rs6000_cpu_index;
4105 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4106 cpu_index = main_target_opt->x_rs6000_cpu_index;
4107 else if (OPTION_TARGET_CPU_DEFAULT)
4108 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4110 if (cpu_index >= 0)
4112 const char *unavailable_cpu = NULL;
4113 switch (processor_target_table[cpu_index].processor)
4115 #ifndef HAVE_AS_POWER9
4116 case PROCESSOR_POWER9:
4117 unavailable_cpu = "power9";
4118 break;
4119 #endif
4120 #ifndef HAVE_AS_POWER8
4121 case PROCESSOR_POWER8:
4122 unavailable_cpu = "power8";
4123 break;
4124 #endif
4125 #ifndef HAVE_AS_POPCNTD
4126 case PROCESSOR_POWER7:
4127 unavailable_cpu = "power7";
4128 break;
4129 #endif
4130 #ifndef HAVE_AS_DFP
4131 case PROCESSOR_POWER6:
4132 unavailable_cpu = "power6";
4133 break;
4134 #endif
4135 #ifndef HAVE_AS_POPCNTB
4136 case PROCESSOR_POWER5:
4137 unavailable_cpu = "power5";
4138 break;
4139 #endif
4140 default:
4141 break;
4143 if (unavailable_cpu)
4145 cpu_index = -1;
4146 warning (0, "will not generate %qs instructions because "
4147 "assembler lacks %qs support", unavailable_cpu,
4148 unavailable_cpu);
4152 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4153 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4154 with those from the cpu, except for options that were explicitly set. If
4155 we don't have a cpu, do not override the target bits set in
4156 TARGET_DEFAULT. */
4157 if (cpu_index >= 0)
4159 rs6000_cpu_index = cpu_index;
4160 rs6000_isa_flags &= ~set_masks;
4161 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4162 & set_masks);
4164 else
4166 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4167 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4168 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4169 to using rs6000_isa_flags, we need to do the initialization here.
4171 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4172 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4173 HOST_WIDE_INT flags;
4174 if (TARGET_DEFAULT)
4175 flags = TARGET_DEFAULT;
4176 else
4178 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4179 const char *default_cpu = (!TARGET_POWERPC64
4180 ? "powerpc"
4181 : (BYTES_BIG_ENDIAN
4182 ? "powerpc64"
4183 : "powerpc64le"));
4184 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4185 flags = processor_target_table[default_cpu_index].target_enable;
4187 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4190 if (rs6000_tune_index >= 0)
4191 tune_index = rs6000_tune_index;
4192 else if (cpu_index >= 0)
4193 rs6000_tune_index = tune_index = cpu_index;
4194 else
4196 size_t i;
4197 enum processor_type tune_proc
4198 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4200 tune_index = -1;
4201 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4202 if (processor_target_table[i].processor == tune_proc)
4204 tune_index = i;
4205 break;
4209 if (cpu_index >= 0)
4210 rs6000_cpu = processor_target_table[cpu_index].processor;
4211 else
4212 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
4214 gcc_assert (tune_index >= 0);
4215 rs6000_tune = processor_target_table[tune_index].processor;
4217 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4218 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4219 || rs6000_cpu == PROCESSOR_PPCE5500)
4221 if (TARGET_ALTIVEC)
4222 error ("AltiVec not supported in this target");
4225 /* If we are optimizing big endian systems for space, use the load/store
4226 multiple instructions. */
4227 if (BYTES_BIG_ENDIAN && optimize_size)
4228 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
4230 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4231 because the hardware doesn't support the instructions used in little
4232 endian mode, and causes an alignment trap. The 750 does not cause an
4233 alignment trap (except when the target is unaligned). */
4235 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
4237 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4238 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4239 warning (0, "%qs is not supported on little endian systems",
4240 "-mmultiple");
4243 /* If little-endian, default to -mstrict-align on older processors.
4244 Testing for htm matches power8 and later. */
4245 if (!BYTES_BIG_ENDIAN
4246 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4247 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4249 /* -maltivec={le,be} implies -maltivec. */
4250 if (rs6000_altivec_element_order != 0)
4251 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4253 /* Disallow -maltivec=le in big endian mode for now. This is not
4254 known to be useful for anyone. */
4255 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4257 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4258 rs6000_altivec_element_order = 0;
4261 if (!rs6000_fold_gimple)
4262 fprintf (stderr,
4263 "gimple folding of rs6000 builtins has been disabled.\n");
4265 /* Add some warnings for VSX. */
4266 if (TARGET_VSX)
4268 const char *msg = NULL;
4269 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4271 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4272 msg = N_("-mvsx requires hardware floating point");
4273 else
4275 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4276 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4279 else if (TARGET_PAIRED_FLOAT)
4280 msg = N_("-mvsx and -mpaired are incompatible");
4281 else if (TARGET_AVOID_XFORM > 0)
4282 msg = N_("-mvsx needs indexed addressing");
4283 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4284 & OPTION_MASK_ALTIVEC))
4286 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4287 msg = N_("-mvsx and -mno-altivec are incompatible");
4288 else
4289 msg = N_("-mno-altivec disables vsx");
4292 if (msg)
4294 warning (0, msg);
4295 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4296 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4300 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4301 the -mcpu setting to enable options that conflict. */
4302 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4303 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4304 | OPTION_MASK_ALTIVEC
4305 | OPTION_MASK_VSX)) != 0)
4306 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4307 | OPTION_MASK_DIRECT_MOVE)
4308 & ~rs6000_isa_flags_explicit);
4310 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4311 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4313 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4314 off all of the options that depend on those flags. */
4315 ignore_masks = rs6000_disable_incompatible_switches ();
4317 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4318 unless the user explicitly used the -mno-<option> to disable the code. */
4319 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4320 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4321 else if (TARGET_P9_MINMAX)
4323 if (cpu_index >= 0)
4325 if (cpu_index == PROCESSOR_POWER9)
4327 /* legacy behavior: allow -mcpu=power9 with certain
4328 capabilities explicitly disabled. */
4329 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4331 else
4332 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4333 "for <xxx> less than power9", "-mcpu");
4335 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4336 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4337 & rs6000_isa_flags_explicit))
4338 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4339 were explicitly cleared. */
4340 error ("%qs incompatible with explicitly disabled options",
4341 "-mpower9-minmax");
4342 else
4343 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4345 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4346 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4347 else if (TARGET_VSX)
4348 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4349 else if (TARGET_POPCNTD)
4350 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4351 else if (TARGET_DFP)
4352 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4353 else if (TARGET_CMPB)
4354 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4355 else if (TARGET_FPRND)
4356 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4357 else if (TARGET_POPCNTB)
4358 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4359 else if (TARGET_ALTIVEC)
4360 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4362 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4364 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4365 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4366 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4369 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4371 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4372 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4373 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4376 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4378 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4379 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4380 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4383 if (TARGET_P8_VECTOR && !TARGET_VSX)
4385 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4386 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4387 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4388 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4390 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4391 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4392 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4394 else
4396 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4397 not explicit. */
4398 rs6000_isa_flags |= OPTION_MASK_VSX;
4399 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4403 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4405 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4406 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4407 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4410 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4411 silently turn off quad memory mode. */
4412 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4414 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4415 warning (0, N_("-mquad-memory requires 64-bit mode"));
4417 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4418 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4420 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4421 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4424 /* Non-atomic quad memory load/store are disabled for little endian, since
4425 the words are reversed, but atomic operations can still be done by
4426 swapping the words. */
4427 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4429 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4430 warning (0, N_("-mquad-memory is not available in little endian "
4431 "mode"));
4433 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4436 /* Assume if the user asked for normal quad memory instructions, they want
4437 the atomic versions as well, unless they explicity told us not to use quad
4438 word atomic instructions. */
4439 if (TARGET_QUAD_MEMORY
4440 && !TARGET_QUAD_MEMORY_ATOMIC
4441 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4442 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4444 /* If we can shrink-wrap the TOC register save separately, then use
4445 -msave-toc-indirect unless explicitly disabled. */
4446 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4447 && flag_shrink_wrap_separate
4448 && optimize_function_for_speed_p (cfun))
4449 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4451 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4452 generating power8 instructions. */
4453 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4454 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4455 & OPTION_MASK_P8_FUSION);
4457 /* Setting additional fusion flags turns on base fusion. */
4458 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4460 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4462 if (TARGET_P8_FUSION_SIGN)
4463 error ("%qs requires %qs", "-mpower8-fusion-sign",
4464 "-mpower8-fusion");
4466 if (TARGET_TOC_FUSION)
4467 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4469 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4471 else
4472 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4475 /* Power9 fusion is a superset over power8 fusion. */
4476 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4478 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4480 /* We prefer to not mention undocumented options in
4481 error messages. However, if users have managed to select
4482 power9-fusion without selecting power8-fusion, they
4483 already know about undocumented flags. */
4484 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4485 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4487 else
4488 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4491 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4492 generating power9 instructions. */
4493 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4494 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4495 & OPTION_MASK_P9_FUSION);
4497 /* Power8 does not fuse sign extended loads with the addis. If we are
4498 optimizing at high levels for speed, convert a sign extended load into a
4499 zero extending load, and an explicit sign extension. */
4500 if (TARGET_P8_FUSION
4501 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4502 && optimize_function_for_speed_p (cfun)
4503 && optimize >= 3)
4504 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4506 /* TOC fusion requires 64-bit and medium/large code model. */
4507 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4509 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4510 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4511 warning (0, N_("-mtoc-fusion requires 64-bit"));
4514 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4516 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4517 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4518 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4521 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4522 model. */
4523 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4524 && (TARGET_CMODEL != CMODEL_SMALL)
4525 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4526 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4528 /* ISA 3.0 vector instructions include ISA 2.07. */
4529 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4531 /* We prefer to not mention undocumented options in
4532 error messages. However, if users have managed to select
4533 power9-vector without selecting power8-vector, they
4534 already know about undocumented flags. */
4535 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4536 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4537 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4538 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4540 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4541 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4542 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4544 else
4546 /* OPTION_MASK_P9_VECTOR is explicit and
4547 OPTION_MASK_P8_VECTOR is not explicit. */
4548 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4549 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4553 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4554 support. If we only have ISA 2.06 support, and the user did not specify
4555 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4556 but we don't enable the full vectorization support */
4557 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4558 TARGET_ALLOW_MOVMISALIGN = 1;
4560 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4562 if (TARGET_ALLOW_MOVMISALIGN > 0
4563 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4564 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4566 TARGET_ALLOW_MOVMISALIGN = 0;
4569 /* Determine when unaligned vector accesses are permitted, and when
4570 they are preferred over masked Altivec loads. Note that if
4571 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4572 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4573 not true. */
4574 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4576 if (!TARGET_VSX)
4578 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4579 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4581 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4584 else if (!TARGET_ALLOW_MOVMISALIGN)
4586 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4587 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4588 "-mallow-movmisalign");
4590 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4594 /* Set long double size before the IEEE 128-bit tests. */
4595 if (!global_options_set.x_rs6000_long_double_type_size)
4597 if (main_target_opt != NULL
4598 && (main_target_opt->x_rs6000_long_double_type_size
4599 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4600 error ("target attribute or pragma changes long double size");
4601 else
4602 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4605 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4606 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4607 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4608 those systems will not pick up this default. Warn if the user changes the
4609 default unless either the user used the -Wno-psabi option, or the compiler
4610 was built to enable multilibs to switch between the two long double
4611 types. */
4612 if (!global_options_set.x_rs6000_ieeequad)
4613 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4615 else if (!TARGET_IEEEQUAD_MULTILIB
4616 && rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT
4617 && TARGET_LONG_DOUBLE_128)
4619 static bool warned_change_long_double;
4620 if (!warned_change_long_double)
4622 warned_change_long_double = true;
4623 if (TARGET_IEEEQUAD)
4624 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4625 else
4626 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4630 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4631 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4632 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4633 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4634 the keyword as well as the type. */
4635 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4637 /* IEEE 128-bit floating point requires VSX support. */
4638 if (TARGET_FLOAT128_KEYWORD)
4640 if (!TARGET_VSX)
4642 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4643 error ("%qs requires VSX support", "-mfloat128");
4645 TARGET_FLOAT128_TYPE = 0;
4646 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4647 | OPTION_MASK_FLOAT128_HW);
4649 else if (!TARGET_FLOAT128_TYPE)
4651 TARGET_FLOAT128_TYPE = 1;
4652 warning (0, "The -mfloat128 option may not be fully supported");
4656 /* Enable the __float128 keyword under Linux by default. */
4657 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4658 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4659 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4661 /* If we have are supporting the float128 type and full ISA 3.0 support,
4662 enable -mfloat128-hardware by default. However, don't enable the
4663 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4664 because sometimes the compiler wants to put things in an integer
4665 container, and if we don't have __int128 support, it is impossible. */
4666 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4667 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4668 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4669 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4671 if (TARGET_FLOAT128_HW
4672 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4674 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4675 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4677 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4680 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4682 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4683 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4685 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4688 /* Print the options after updating the defaults. */
4689 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4690 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4692 /* E500mc does "better" if we inline more aggressively. Respect the
4693 user's opinion, though. */
4694 if (rs6000_block_move_inline_limit == 0
4695 && (rs6000_tune == PROCESSOR_PPCE500MC
4696 || rs6000_tune == PROCESSOR_PPCE500MC64
4697 || rs6000_tune == PROCESSOR_PPCE5500
4698 || rs6000_tune == PROCESSOR_PPCE6500))
4699 rs6000_block_move_inline_limit = 128;
4701 /* store_one_arg depends on expand_block_move to handle at least the
4702 size of reg_parm_stack_space. */
4703 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4704 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4706 if (global_init_p)
4708 /* If the appropriate debug option is enabled, replace the target hooks
4709 with debug versions that call the real version and then prints
4710 debugging information. */
4711 if (TARGET_DEBUG_COST)
4713 targetm.rtx_costs = rs6000_debug_rtx_costs;
4714 targetm.address_cost = rs6000_debug_address_cost;
4715 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4718 if (TARGET_DEBUG_ADDR)
4720 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4721 targetm.legitimize_address = rs6000_debug_legitimize_address;
4722 rs6000_secondary_reload_class_ptr
4723 = rs6000_debug_secondary_reload_class;
4724 targetm.secondary_memory_needed
4725 = rs6000_debug_secondary_memory_needed;
4726 targetm.can_change_mode_class
4727 = rs6000_debug_can_change_mode_class;
4728 rs6000_preferred_reload_class_ptr
4729 = rs6000_debug_preferred_reload_class;
4730 rs6000_legitimize_reload_address_ptr
4731 = rs6000_debug_legitimize_reload_address;
4732 rs6000_mode_dependent_address_ptr
4733 = rs6000_debug_mode_dependent_address;
4736 if (rs6000_veclibabi_name)
4738 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4739 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4740 else
4742 error ("unknown vectorization library ABI type (%qs) for "
4743 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4744 ret = false;
4749 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4750 target attribute or pragma which automatically enables both options,
4751 unless the altivec ABI was set. This is set by default for 64-bit, but
4752 not for 32-bit. */
4753 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4755 TARGET_FLOAT128_TYPE = 0;
4756 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4757 | OPTION_MASK_FLOAT128_KEYWORD)
4758 & ~rs6000_isa_flags_explicit);
4761 /* Enable Altivec ABI for AIX -maltivec. */
4762 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4764 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4765 error ("target attribute or pragma changes AltiVec ABI");
4766 else
4767 rs6000_altivec_abi = 1;
4770 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4771 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4772 be explicitly overridden in either case. */
4773 if (TARGET_ELF)
4775 if (!global_options_set.x_rs6000_altivec_abi
4776 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4778 if (main_target_opt != NULL &&
4779 !main_target_opt->x_rs6000_altivec_abi)
4780 error ("target attribute or pragma changes AltiVec ABI");
4781 else
4782 rs6000_altivec_abi = 1;
4786 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4787 So far, the only darwin64 targets are also MACH-O. */
4788 if (TARGET_MACHO
4789 && DEFAULT_ABI == ABI_DARWIN
4790 && TARGET_64BIT)
4792 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4793 error ("target attribute or pragma changes darwin64 ABI");
4794 else
4796 rs6000_darwin64_abi = 1;
4797 /* Default to natural alignment, for better performance. */
4798 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4802 /* Place FP constants in the constant pool instead of TOC
4803 if section anchors enabled. */
4804 if (flag_section_anchors
4805 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4806 TARGET_NO_FP_IN_TOC = 1;
4808 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4809 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4811 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4812 SUBTARGET_OVERRIDE_OPTIONS;
4813 #endif
4814 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4815 SUBSUBTARGET_OVERRIDE_OPTIONS;
4816 #endif
4817 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4818 SUB3TARGET_OVERRIDE_OPTIONS;
4819 #endif
4821 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4822 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4824 if (main_target_opt)
4826 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4827 error ("target attribute or pragma changes single precision floating "
4828 "point");
4829 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4830 error ("target attribute or pragma changes double precision floating "
4831 "point");
4834 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4835 && rs6000_tune != PROCESSOR_POWER5
4836 && rs6000_tune != PROCESSOR_POWER6
4837 && rs6000_tune != PROCESSOR_POWER7
4838 && rs6000_tune != PROCESSOR_POWER8
4839 && rs6000_tune != PROCESSOR_POWER9
4840 && rs6000_tune != PROCESSOR_PPCA2
4841 && rs6000_tune != PROCESSOR_CELL
4842 && rs6000_tune != PROCESSOR_PPC476);
4843 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4844 || rs6000_tune == PROCESSOR_POWER5
4845 || rs6000_tune == PROCESSOR_POWER7
4846 || rs6000_tune == PROCESSOR_POWER8);
4847 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4848 || rs6000_tune == PROCESSOR_POWER5
4849 || rs6000_tune == PROCESSOR_POWER6
4850 || rs6000_tune == PROCESSOR_POWER7
4851 || rs6000_tune == PROCESSOR_POWER8
4852 || rs6000_tune == PROCESSOR_POWER9
4853 || rs6000_tune == PROCESSOR_PPCE500MC
4854 || rs6000_tune == PROCESSOR_PPCE500MC64
4855 || rs6000_tune == PROCESSOR_PPCE5500
4856 || rs6000_tune == PROCESSOR_PPCE6500);
4858 /* Allow debug switches to override the above settings. These are set to -1
4859 in rs6000.opt to indicate the user hasn't directly set the switch. */
4860 if (TARGET_ALWAYS_HINT >= 0)
4861 rs6000_always_hint = TARGET_ALWAYS_HINT;
4863 if (TARGET_SCHED_GROUPS >= 0)
4864 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4866 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4867 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4869 rs6000_sched_restricted_insns_priority
4870 = (rs6000_sched_groups ? 1 : 0);
4872 /* Handle -msched-costly-dep option. */
4873 rs6000_sched_costly_dep
4874 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4876 if (rs6000_sched_costly_dep_str)
4878 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4879 rs6000_sched_costly_dep = no_dep_costly;
4880 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4881 rs6000_sched_costly_dep = all_deps_costly;
4882 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4883 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4884 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4885 rs6000_sched_costly_dep = store_to_load_dep_costly;
4886 else
4887 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4888 atoi (rs6000_sched_costly_dep_str));
4891 /* Handle -minsert-sched-nops option. */
4892 rs6000_sched_insert_nops
4893 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4895 if (rs6000_sched_insert_nops_str)
4897 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4898 rs6000_sched_insert_nops = sched_finish_none;
4899 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4900 rs6000_sched_insert_nops = sched_finish_pad_groups;
4901 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4902 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4903 else
4904 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4905 atoi (rs6000_sched_insert_nops_str));
4908 /* Handle stack protector */
4909 if (!global_options_set.x_rs6000_stack_protector_guard)
4910 #ifdef TARGET_THREAD_SSP_OFFSET
4911 rs6000_stack_protector_guard = SSP_TLS;
4912 #else
4913 rs6000_stack_protector_guard = SSP_GLOBAL;
4914 #endif
4916 #ifdef TARGET_THREAD_SSP_OFFSET
4917 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4918 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4919 #endif
4921 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4923 char *endp;
4924 const char *str = rs6000_stack_protector_guard_offset_str;
4926 errno = 0;
4927 long offset = strtol (str, &endp, 0);
4928 if (!*str || *endp || errno)
4929 error ("%qs is not a valid number in %qs", str,
4930 "-mstack-protector-guard-offset=");
4932 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4933 || (TARGET_64BIT && (offset & 3)))
4934 error ("%qs is not a valid offset in %qs", str,
4935 "-mstack-protector-guard-offset=");
4937 rs6000_stack_protector_guard_offset = offset;
4940 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4942 const char *str = rs6000_stack_protector_guard_reg_str;
4943 int reg = decode_reg_name (str);
4945 if (!IN_RANGE (reg, 1, 31))
4946 error ("%qs is not a valid base register in %qs", str,
4947 "-mstack-protector-guard-reg=");
4949 rs6000_stack_protector_guard_reg = reg;
4952 if (rs6000_stack_protector_guard == SSP_TLS
4953 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4954 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4956 if (global_init_p)
4958 #ifdef TARGET_REGNAMES
4959 /* If the user desires alternate register names, copy in the
4960 alternate names now. */
4961 if (TARGET_REGNAMES)
4962 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4963 #endif
4965 /* Set aix_struct_return last, after the ABI is determined.
4966 If -maix-struct-return or -msvr4-struct-return was explicitly
4967 used, don't override with the ABI default. */
4968 if (!global_options_set.x_aix_struct_return)
4969 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4971 #if 0
4972 /* IBM XL compiler defaults to unsigned bitfields. */
4973 if (TARGET_XL_COMPAT)
4974 flag_signed_bitfields = 0;
4975 #endif
4977 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4978 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4980 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4982 /* We can only guarantee the availability of DI pseudo-ops when
4983 assembling for 64-bit targets. */
4984 if (!TARGET_64BIT)
4986 targetm.asm_out.aligned_op.di = NULL;
4987 targetm.asm_out.unaligned_op.di = NULL;
4991 /* Set branch target alignment, if not optimizing for size. */
4992 if (!optimize_size)
4994 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4995 aligned 8byte to avoid misprediction by the branch predictor. */
4996 if (rs6000_tune == PROCESSOR_TITAN
4997 || rs6000_tune == PROCESSOR_CELL)
4999 if (align_functions <= 0)
5000 align_functions = 8;
5001 if (align_jumps <= 0)
5002 align_jumps = 8;
5003 if (align_loops <= 0)
5004 align_loops = 8;
5006 if (rs6000_align_branch_targets)
5008 if (align_functions <= 0)
5009 align_functions = 16;
5010 if (align_jumps <= 0)
5011 align_jumps = 16;
5012 if (align_loops <= 0)
5014 can_override_loop_align = 1;
5015 align_loops = 16;
5018 if (align_jumps_max_skip <= 0)
5019 align_jumps_max_skip = 15;
5020 if (align_loops_max_skip <= 0)
5021 align_loops_max_skip = 15;
5024 /* Arrange to save and restore machine status around nested functions. */
5025 init_machine_status = rs6000_init_machine_status;
5027 /* We should always be splitting complex arguments, but we can't break
5028 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5029 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5030 targetm.calls.split_complex_arg = NULL;
5032 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5033 if (DEFAULT_ABI == ABI_AIX)
5034 targetm.calls.custom_function_descriptors = 0;
5037 /* Initialize rs6000_cost with the appropriate target costs. */
5038 if (optimize_size)
5039 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5040 else
5041 switch (rs6000_tune)
5043 case PROCESSOR_RS64A:
5044 rs6000_cost = &rs64a_cost;
5045 break;
5047 case PROCESSOR_MPCCORE:
5048 rs6000_cost = &mpccore_cost;
5049 break;
5051 case PROCESSOR_PPC403:
5052 rs6000_cost = &ppc403_cost;
5053 break;
5055 case PROCESSOR_PPC405:
5056 rs6000_cost = &ppc405_cost;
5057 break;
5059 case PROCESSOR_PPC440:
5060 rs6000_cost = &ppc440_cost;
5061 break;
5063 case PROCESSOR_PPC476:
5064 rs6000_cost = &ppc476_cost;
5065 break;
5067 case PROCESSOR_PPC601:
5068 rs6000_cost = &ppc601_cost;
5069 break;
5071 case PROCESSOR_PPC603:
5072 rs6000_cost = &ppc603_cost;
5073 break;
5075 case PROCESSOR_PPC604:
5076 rs6000_cost = &ppc604_cost;
5077 break;
5079 case PROCESSOR_PPC604e:
5080 rs6000_cost = &ppc604e_cost;
5081 break;
5083 case PROCESSOR_PPC620:
5084 rs6000_cost = &ppc620_cost;
5085 break;
5087 case PROCESSOR_PPC630:
5088 rs6000_cost = &ppc630_cost;
5089 break;
5091 case PROCESSOR_CELL:
5092 rs6000_cost = &ppccell_cost;
5093 break;
5095 case PROCESSOR_PPC750:
5096 case PROCESSOR_PPC7400:
5097 rs6000_cost = &ppc750_cost;
5098 break;
5100 case PROCESSOR_PPC7450:
5101 rs6000_cost = &ppc7450_cost;
5102 break;
5104 case PROCESSOR_PPC8540:
5105 case PROCESSOR_PPC8548:
5106 rs6000_cost = &ppc8540_cost;
5107 break;
5109 case PROCESSOR_PPCE300C2:
5110 case PROCESSOR_PPCE300C3:
5111 rs6000_cost = &ppce300c2c3_cost;
5112 break;
5114 case PROCESSOR_PPCE500MC:
5115 rs6000_cost = &ppce500mc_cost;
5116 break;
5118 case PROCESSOR_PPCE500MC64:
5119 rs6000_cost = &ppce500mc64_cost;
5120 break;
5122 case PROCESSOR_PPCE5500:
5123 rs6000_cost = &ppce5500_cost;
5124 break;
5126 case PROCESSOR_PPCE6500:
5127 rs6000_cost = &ppce6500_cost;
5128 break;
5130 case PROCESSOR_TITAN:
5131 rs6000_cost = &titan_cost;
5132 break;
5134 case PROCESSOR_POWER4:
5135 case PROCESSOR_POWER5:
5136 rs6000_cost = &power4_cost;
5137 break;
5139 case PROCESSOR_POWER6:
5140 rs6000_cost = &power6_cost;
5141 break;
5143 case PROCESSOR_POWER7:
5144 rs6000_cost = &power7_cost;
5145 break;
5147 case PROCESSOR_POWER8:
5148 rs6000_cost = &power8_cost;
5149 break;
5151 case PROCESSOR_POWER9:
5152 rs6000_cost = &power9_cost;
5153 break;
5155 case PROCESSOR_PPCA2:
5156 rs6000_cost = &ppca2_cost;
5157 break;
5159 default:
5160 gcc_unreachable ();
5163 if (global_init_p)
5165 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5166 rs6000_cost->simultaneous_prefetches,
5167 global_options.x_param_values,
5168 global_options_set.x_param_values);
5169 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5170 global_options.x_param_values,
5171 global_options_set.x_param_values);
5172 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5173 rs6000_cost->cache_line_size,
5174 global_options.x_param_values,
5175 global_options_set.x_param_values);
5176 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5177 global_options.x_param_values,
5178 global_options_set.x_param_values);
5180 /* Increase loop peeling limits based on performance analysis. */
5181 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5182 global_options.x_param_values,
5183 global_options_set.x_param_values);
5184 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5185 global_options.x_param_values,
5186 global_options_set.x_param_values);
5188 /* Use the 'model' -fsched-pressure algorithm by default. */
5189 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5190 SCHED_PRESSURE_MODEL,
5191 global_options.x_param_values,
5192 global_options_set.x_param_values);
5194 /* If using typedef char *va_list, signal that
5195 __builtin_va_start (&ap, 0) can be optimized to
5196 ap = __builtin_next_arg (0). */
5197 if (DEFAULT_ABI != ABI_V4)
5198 targetm.expand_builtin_va_start = NULL;
5201 /* Set up single/double float flags.
5202 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5203 then set both flags. */
5204 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5205 rs6000_single_float = rs6000_double_float = 1;
5207 /* If not explicitly specified via option, decide whether to generate indexed
5208 load/store instructions. A value of -1 indicates that the
5209 initial value of this variable has not been overwritten. During
5210 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5211 if (TARGET_AVOID_XFORM == -1)
5212 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5213 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5214 need indexed accesses and the type used is the scalar type of the element
5215 being loaded or stored. */
5216 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
5217 && !TARGET_ALTIVEC);
5219 /* Set the -mrecip options. */
5220 if (rs6000_recip_name)
5222 char *p = ASTRDUP (rs6000_recip_name);
5223 char *q;
5224 unsigned int mask, i;
5225 bool invert;
5227 while ((q = strtok (p, ",")) != NULL)
5229 p = NULL;
5230 if (*q == '!')
5232 invert = true;
5233 q++;
5235 else
5236 invert = false;
5238 if (!strcmp (q, "default"))
5239 mask = ((TARGET_RECIP_PRECISION)
5240 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5241 else
5243 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5244 if (!strcmp (q, recip_options[i].string))
5246 mask = recip_options[i].mask;
5247 break;
5250 if (i == ARRAY_SIZE (recip_options))
5252 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5253 invert = false;
5254 mask = 0;
5255 ret = false;
5259 if (invert)
5260 rs6000_recip_control &= ~mask;
5261 else
5262 rs6000_recip_control |= mask;
5266 /* Set the builtin mask of the various options used that could affect which
5267 builtins were used. In the past we used target_flags, but we've run out
5268 of bits, and some options like PAIRED are no longer in target_flags. */
5269 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5270 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5271 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5272 rs6000_builtin_mask);
5274 /* Initialize all of the registers. */
5275 rs6000_init_hard_regno_mode_ok (global_init_p);
5277 /* Save the initial options in case the user does function specific options */
5278 if (global_init_p)
5279 target_option_default_node = target_option_current_node
5280 = build_target_option_node (&global_options);
5282 /* If not explicitly specified via option, decide whether to generate the
5283 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5284 if (TARGET_LINK_STACK == -1)
5285 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
5287 /* Deprecate use of -mno-speculate-indirect-jumps. */
5288 if (!rs6000_speculate_indirect_jumps)
5289 warning (0, "%qs is deprecated and not recommended in any circumstances",
5290 "-mno-speculate-indirect-jumps");
5292 return ret;
5295 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5296 define the target cpu type. */
5298 static void
5299 rs6000_option_override (void)
5301 (void) rs6000_option_override_internal (true);
5305 /* Implement targetm.vectorize.builtin_mask_for_load. */
5306 static tree
5307 rs6000_builtin_mask_for_load (void)
5309 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5310 if ((TARGET_ALTIVEC && !TARGET_VSX)
5311 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5312 return altivec_builtin_mask_for_load;
5313 else
5314 return 0;
5317 /* Implement LOOP_ALIGN. */
5319 rs6000_loop_align (rtx label)
5321 basic_block bb;
5322 int ninsns;
5324 /* Don't override loop alignment if -falign-loops was specified. */
5325 if (!can_override_loop_align)
5326 return align_loops_log;
5328 bb = BLOCK_FOR_INSN (label);
5329 ninsns = num_loop_insns(bb->loop_father);
5331 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5332 if (ninsns > 4 && ninsns <= 8
5333 && (rs6000_tune == PROCESSOR_POWER4
5334 || rs6000_tune == PROCESSOR_POWER5
5335 || rs6000_tune == PROCESSOR_POWER6
5336 || rs6000_tune == PROCESSOR_POWER7
5337 || rs6000_tune == PROCESSOR_POWER8))
5338 return 5;
5339 else
5340 return align_loops_log;
5343 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5344 static int
5345 rs6000_loop_align_max_skip (rtx_insn *label)
5347 return (1 << rs6000_loop_align (label)) - 1;
5350 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5351 after applying N number of iterations. This routine does not determine
5352 how may iterations are required to reach desired alignment. */
5354 static bool
5355 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5357 if (is_packed)
5358 return false;
5360 if (TARGET_32BIT)
5362 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5363 return true;
5365 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5366 return true;
5368 return false;
5370 else
5372 if (TARGET_MACHO)
5373 return false;
5375 /* Assuming that all other types are naturally aligned. CHECKME! */
5376 return true;
5380 /* Return true if the vector misalignment factor is supported by the
5381 target. */
5382 static bool
5383 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5384 const_tree type,
5385 int misalignment,
5386 bool is_packed)
5388 if (TARGET_VSX)
5390 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5391 return true;
5393 /* Return if movmisalign pattern is not supported for this mode. */
5394 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5395 return false;
5397 if (misalignment == -1)
5399 /* Misalignment factor is unknown at compile time but we know
5400 it's word aligned. */
5401 if (rs6000_vector_alignment_reachable (type, is_packed))
5403 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5405 if (element_size == 64 || element_size == 32)
5406 return true;
5409 return false;
5412 /* VSX supports word-aligned vector. */
5413 if (misalignment % 4 == 0)
5414 return true;
5416 return false;
5419 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5420 static int
5421 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5422 tree vectype, int misalign)
5424 unsigned elements;
5425 tree elem_type;
5427 switch (type_of_cost)
5429 case scalar_stmt:
5430 case scalar_load:
5431 case scalar_store:
5432 case vector_stmt:
5433 case vector_load:
5434 case vector_store:
5435 case vec_to_scalar:
5436 case scalar_to_vec:
5437 case cond_branch_not_taken:
5438 return 1;
5440 case vec_perm:
5441 if (TARGET_VSX)
5442 return 3;
5443 else
5444 return 1;
5446 case vec_promote_demote:
5447 if (TARGET_VSX)
5448 return 4;
5449 else
5450 return 1;
5452 case cond_branch_taken:
5453 return 3;
5455 case unaligned_load:
5456 case vector_gather_load:
5457 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5458 return 1;
5460 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5462 elements = TYPE_VECTOR_SUBPARTS (vectype);
5463 if (elements == 2)
5464 /* Double word aligned. */
5465 return 2;
5467 if (elements == 4)
5469 switch (misalign)
5471 case 8:
5472 /* Double word aligned. */
5473 return 2;
5475 case -1:
5476 /* Unknown misalignment. */
5477 case 4:
5478 case 12:
5479 /* Word aligned. */
5480 return 22;
5482 default:
5483 gcc_unreachable ();
5488 if (TARGET_ALTIVEC)
5489 /* Misaligned loads are not supported. */
5490 gcc_unreachable ();
5492 return 2;
5494 case unaligned_store:
5495 case vector_scatter_store:
5496 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5497 return 1;
5499 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5501 elements = TYPE_VECTOR_SUBPARTS (vectype);
5502 if (elements == 2)
5503 /* Double word aligned. */
5504 return 2;
5506 if (elements == 4)
5508 switch (misalign)
5510 case 8:
5511 /* Double word aligned. */
5512 return 2;
5514 case -1:
5515 /* Unknown misalignment. */
5516 case 4:
5517 case 12:
5518 /* Word aligned. */
5519 return 23;
5521 default:
5522 gcc_unreachable ();
5527 if (TARGET_ALTIVEC)
5528 /* Misaligned stores are not supported. */
5529 gcc_unreachable ();
5531 return 2;
5533 case vec_construct:
5534 /* This is a rough approximation assuming non-constant elements
5535 constructed into a vector via element insertion. FIXME:
5536 vec_construct is not granular enough for uniformly good
5537 decisions. If the initialization is a splat, this is
5538 cheaper than we estimate. Improve this someday. */
5539 elem_type = TREE_TYPE (vectype);
5540 /* 32-bit vectors loaded into registers are stored as double
5541 precision, so we need 2 permutes, 2 converts, and 1 merge
5542 to construct a vector of short floats from them. */
5543 if (SCALAR_FLOAT_TYPE_P (elem_type)
5544 && TYPE_PRECISION (elem_type) == 32)
5545 return 5;
5546 /* On POWER9, integer vector types are built up in GPRs and then
5547 use a direct move (2 cycles). For POWER8 this is even worse,
5548 as we need two direct moves and a merge, and the direct moves
5549 are five cycles. */
5550 else if (INTEGRAL_TYPE_P (elem_type))
5552 if (TARGET_P9_VECTOR)
5553 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5554 else
5555 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5557 else
5558 /* V2DFmode doesn't need a direct move. */
5559 return 2;
5561 default:
5562 gcc_unreachable ();
5566 /* Implement targetm.vectorize.preferred_simd_mode. */
5568 static machine_mode
5569 rs6000_preferred_simd_mode (scalar_mode mode)
5571 if (TARGET_VSX)
5572 switch (mode)
5574 case E_DFmode:
5575 return V2DFmode;
5576 default:;
5578 if (TARGET_ALTIVEC || TARGET_VSX)
5579 switch (mode)
5581 case E_SFmode:
5582 return V4SFmode;
5583 case E_TImode:
5584 return V1TImode;
5585 case E_DImode:
5586 return V2DImode;
5587 case E_SImode:
5588 return V4SImode;
5589 case E_HImode:
5590 return V8HImode;
5591 case E_QImode:
5592 return V16QImode;
5593 default:;
5595 if (TARGET_PAIRED_FLOAT
5596 && mode == SFmode)
5597 return V2SFmode;
5598 return word_mode;
5601 typedef struct _rs6000_cost_data
5603 struct loop *loop_info;
5604 unsigned cost[3];
5605 } rs6000_cost_data;
5607 /* Test for likely overcommitment of vector hardware resources. If a
5608 loop iteration is relatively large, and too large a percentage of
5609 instructions in the loop are vectorized, the cost model may not
5610 adequately reflect delays from unavailable vector resources.
5611 Penalize the loop body cost for this case. */
5613 static void
5614 rs6000_density_test (rs6000_cost_data *data)
5616 const int DENSITY_PCT_THRESHOLD = 85;
5617 const int DENSITY_SIZE_THRESHOLD = 70;
5618 const int DENSITY_PENALTY = 10;
5619 struct loop *loop = data->loop_info;
5620 basic_block *bbs = get_loop_body (loop);
5621 int nbbs = loop->num_nodes;
5622 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5623 int i, density_pct;
5625 for (i = 0; i < nbbs; i++)
5627 basic_block bb = bbs[i];
5628 gimple_stmt_iterator gsi;
5630 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5632 gimple *stmt = gsi_stmt (gsi);
5633 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5635 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5636 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5637 not_vec_cost++;
5641 free (bbs);
5642 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5644 if (density_pct > DENSITY_PCT_THRESHOLD
5645 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5647 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5648 if (dump_enabled_p ())
5649 dump_printf_loc (MSG_NOTE, vect_location,
5650 "density %d%%, cost %d exceeds threshold, penalizing "
5651 "loop body cost by %d%%", density_pct,
5652 vec_cost + not_vec_cost, DENSITY_PENALTY);
5656 /* Implement targetm.vectorize.init_cost. */
5658 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5659 instruction is needed by the vectorization. */
5660 static bool rs6000_vect_nonmem;
5662 static void *
5663 rs6000_init_cost (struct loop *loop_info)
5665 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5666 data->loop_info = loop_info;
5667 data->cost[vect_prologue] = 0;
5668 data->cost[vect_body] = 0;
5669 data->cost[vect_epilogue] = 0;
5670 rs6000_vect_nonmem = false;
5671 return data;
5674 /* Implement targetm.vectorize.add_stmt_cost. */
5676 static unsigned
5677 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5678 struct _stmt_vec_info *stmt_info, int misalign,
5679 enum vect_cost_model_location where)
5681 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5682 unsigned retval = 0;
5684 if (flag_vect_cost_model)
5686 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5687 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5688 misalign);
5689 /* Statements in an inner loop relative to the loop being
5690 vectorized are weighted more heavily. The value here is
5691 arbitrary and could potentially be improved with analysis. */
5692 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5693 count *= 50; /* FIXME. */
5695 retval = (unsigned) (count * stmt_cost);
5696 cost_data->cost[where] += retval;
5698 /* Check whether we're doing something other than just a copy loop.
5699 Not all such loops may be profitably vectorized; see
5700 rs6000_finish_cost. */
5701 if ((kind == vec_to_scalar || kind == vec_perm
5702 || kind == vec_promote_demote || kind == vec_construct
5703 || kind == scalar_to_vec)
5704 || (where == vect_body && kind == vector_stmt))
5705 rs6000_vect_nonmem = true;
5708 return retval;
5711 /* Implement targetm.vectorize.finish_cost. */
5713 static void
5714 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5715 unsigned *body_cost, unsigned *epilogue_cost)
5717 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5719 if (cost_data->loop_info)
5720 rs6000_density_test (cost_data);
5722 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5723 that require versioning for any reason. The vectorization is at
5724 best a wash inside the loop, and the versioning checks make
5725 profitability highly unlikely and potentially quite harmful. */
5726 if (cost_data->loop_info)
5728 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5729 if (!rs6000_vect_nonmem
5730 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5731 && LOOP_REQUIRES_VERSIONING (vec_info))
5732 cost_data->cost[vect_body] += 10000;
5735 *prologue_cost = cost_data->cost[vect_prologue];
5736 *body_cost = cost_data->cost[vect_body];
5737 *epilogue_cost = cost_data->cost[vect_epilogue];
5740 /* Implement targetm.vectorize.destroy_cost_data. */
5742 static void
5743 rs6000_destroy_cost_data (void *data)
5745 free (data);
5748 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5749 library with vectorized intrinsics. */
5751 static tree
5752 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5753 tree type_in)
5755 char name[32];
5756 const char *suffix = NULL;
5757 tree fntype, new_fndecl, bdecl = NULL_TREE;
5758 int n_args = 1;
5759 const char *bname;
5760 machine_mode el_mode, in_mode;
5761 int n, in_n;
5763 /* Libmass is suitable for unsafe math only as it does not correctly support
5764 parts of IEEE with the required precision such as denormals. Only support
5765 it if we have VSX to use the simd d2 or f4 functions.
5766 XXX: Add variable length support. */
5767 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5768 return NULL_TREE;
5770 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5771 n = TYPE_VECTOR_SUBPARTS (type_out);
5772 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5773 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5774 if (el_mode != in_mode
5775 || n != in_n)
5776 return NULL_TREE;
5778 switch (fn)
5780 CASE_CFN_ATAN2:
5781 CASE_CFN_HYPOT:
5782 CASE_CFN_POW:
5783 n_args = 2;
5784 gcc_fallthrough ();
5786 CASE_CFN_ACOS:
5787 CASE_CFN_ACOSH:
5788 CASE_CFN_ASIN:
5789 CASE_CFN_ASINH:
5790 CASE_CFN_ATAN:
5791 CASE_CFN_ATANH:
5792 CASE_CFN_CBRT:
5793 CASE_CFN_COS:
5794 CASE_CFN_COSH:
5795 CASE_CFN_ERF:
5796 CASE_CFN_ERFC:
5797 CASE_CFN_EXP2:
5798 CASE_CFN_EXP:
5799 CASE_CFN_EXPM1:
5800 CASE_CFN_LGAMMA:
5801 CASE_CFN_LOG10:
5802 CASE_CFN_LOG1P:
5803 CASE_CFN_LOG2:
5804 CASE_CFN_LOG:
5805 CASE_CFN_SIN:
5806 CASE_CFN_SINH:
5807 CASE_CFN_SQRT:
5808 CASE_CFN_TAN:
5809 CASE_CFN_TANH:
5810 if (el_mode == DFmode && n == 2)
5812 bdecl = mathfn_built_in (double_type_node, fn);
5813 suffix = "d2"; /* pow -> powd2 */
5815 else if (el_mode == SFmode && n == 4)
5817 bdecl = mathfn_built_in (float_type_node, fn);
5818 suffix = "4"; /* powf -> powf4 */
5820 else
5821 return NULL_TREE;
5822 if (!bdecl)
5823 return NULL_TREE;
5824 break;
5826 default:
5827 return NULL_TREE;
5830 gcc_assert (suffix != NULL);
5831 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5832 if (!bname)
5833 return NULL_TREE;
5835 strcpy (name, bname + sizeof ("__builtin_") - 1);
5836 strcat (name, suffix);
5838 if (n_args == 1)
5839 fntype = build_function_type_list (type_out, type_in, NULL);
5840 else if (n_args == 2)
5841 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5842 else
5843 gcc_unreachable ();
5845 /* Build a function declaration for the vectorized function. */
5846 new_fndecl = build_decl (BUILTINS_LOCATION,
5847 FUNCTION_DECL, get_identifier (name), fntype);
5848 TREE_PUBLIC (new_fndecl) = 1;
5849 DECL_EXTERNAL (new_fndecl) = 1;
5850 DECL_IS_NOVOPS (new_fndecl) = 1;
5851 TREE_READONLY (new_fndecl) = 1;
5853 return new_fndecl;
5856 /* Returns a function decl for a vectorized version of the builtin function
5857 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5858 if it is not available. */
5860 static tree
5861 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5862 tree type_in)
5864 machine_mode in_mode, out_mode;
5865 int in_n, out_n;
5867 if (TARGET_DEBUG_BUILTIN)
5868 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5869 combined_fn_name (combined_fn (fn)),
5870 GET_MODE_NAME (TYPE_MODE (type_out)),
5871 GET_MODE_NAME (TYPE_MODE (type_in)));
5873 if (TREE_CODE (type_out) != VECTOR_TYPE
5874 || TREE_CODE (type_in) != VECTOR_TYPE)
5875 return NULL_TREE;
5877 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5878 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5879 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5880 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5882 switch (fn)
5884 CASE_CFN_COPYSIGN:
5885 if (VECTOR_UNIT_VSX_P (V2DFmode)
5886 && out_mode == DFmode && out_n == 2
5887 && in_mode == DFmode && in_n == 2)
5888 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5889 if (VECTOR_UNIT_VSX_P (V4SFmode)
5890 && out_mode == SFmode && out_n == 4
5891 && in_mode == SFmode && in_n == 4)
5892 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5893 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5894 && out_mode == SFmode && out_n == 4
5895 && in_mode == SFmode && in_n == 4)
5896 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5897 break;
5898 CASE_CFN_CEIL:
5899 if (VECTOR_UNIT_VSX_P (V2DFmode)
5900 && out_mode == DFmode && out_n == 2
5901 && in_mode == DFmode && in_n == 2)
5902 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5903 if (VECTOR_UNIT_VSX_P (V4SFmode)
5904 && out_mode == SFmode && out_n == 4
5905 && in_mode == SFmode && in_n == 4)
5906 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5907 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5908 && out_mode == SFmode && out_n == 4
5909 && in_mode == SFmode && in_n == 4)
5910 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5911 break;
5912 CASE_CFN_FLOOR:
5913 if (VECTOR_UNIT_VSX_P (V2DFmode)
5914 && out_mode == DFmode && out_n == 2
5915 && in_mode == DFmode && in_n == 2)
5916 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5917 if (VECTOR_UNIT_VSX_P (V4SFmode)
5918 && out_mode == SFmode && out_n == 4
5919 && in_mode == SFmode && in_n == 4)
5920 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5921 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5922 && out_mode == SFmode && out_n == 4
5923 && in_mode == SFmode && in_n == 4)
5924 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5925 break;
5926 CASE_CFN_FMA:
5927 if (VECTOR_UNIT_VSX_P (V2DFmode)
5928 && out_mode == DFmode && out_n == 2
5929 && in_mode == DFmode && in_n == 2)
5930 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5931 if (VECTOR_UNIT_VSX_P (V4SFmode)
5932 && out_mode == SFmode && out_n == 4
5933 && in_mode == SFmode && in_n == 4)
5934 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5935 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5936 && out_mode == SFmode && out_n == 4
5937 && in_mode == SFmode && in_n == 4)
5938 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5939 break;
5940 CASE_CFN_TRUNC:
5941 if (VECTOR_UNIT_VSX_P (V2DFmode)
5942 && out_mode == DFmode && out_n == 2
5943 && in_mode == DFmode && in_n == 2)
5944 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5945 if (VECTOR_UNIT_VSX_P (V4SFmode)
5946 && out_mode == SFmode && out_n == 4
5947 && in_mode == SFmode && in_n == 4)
5948 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5949 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5950 && out_mode == SFmode && out_n == 4
5951 && in_mode == SFmode && in_n == 4)
5952 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5953 break;
5954 CASE_CFN_NEARBYINT:
5955 if (VECTOR_UNIT_VSX_P (V2DFmode)
5956 && flag_unsafe_math_optimizations
5957 && out_mode == DFmode && out_n == 2
5958 && in_mode == DFmode && in_n == 2)
5959 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5960 if (VECTOR_UNIT_VSX_P (V4SFmode)
5961 && flag_unsafe_math_optimizations
5962 && out_mode == SFmode && out_n == 4
5963 && in_mode == SFmode && in_n == 4)
5964 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5965 break;
5966 CASE_CFN_RINT:
5967 if (VECTOR_UNIT_VSX_P (V2DFmode)
5968 && !flag_trapping_math
5969 && out_mode == DFmode && out_n == 2
5970 && in_mode == DFmode && in_n == 2)
5971 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5972 if (VECTOR_UNIT_VSX_P (V4SFmode)
5973 && !flag_trapping_math
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5977 break;
5978 default:
5979 break;
5982 /* Generate calls to libmass if appropriate. */
5983 if (rs6000_veclib_handler)
5984 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5986 return NULL_TREE;
5989 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5991 static tree
5992 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5993 tree type_in)
5995 machine_mode in_mode, out_mode;
5996 int in_n, out_n;
5998 if (TARGET_DEBUG_BUILTIN)
5999 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6000 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6001 GET_MODE_NAME (TYPE_MODE (type_out)),
6002 GET_MODE_NAME (TYPE_MODE (type_in)));
6004 if (TREE_CODE (type_out) != VECTOR_TYPE
6005 || TREE_CODE (type_in) != VECTOR_TYPE)
6006 return NULL_TREE;
6008 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6009 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6010 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6011 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6013 enum rs6000_builtins fn
6014 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6015 switch (fn)
6017 case RS6000_BUILTIN_RSQRTF:
6018 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6019 && out_mode == SFmode && out_n == 4
6020 && in_mode == SFmode && in_n == 4)
6021 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6022 break;
6023 case RS6000_BUILTIN_RSQRT:
6024 if (VECTOR_UNIT_VSX_P (V2DFmode)
6025 && out_mode == DFmode && out_n == 2
6026 && in_mode == DFmode && in_n == 2)
6027 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6028 break;
6029 case RS6000_BUILTIN_RECIPF:
6030 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6031 && out_mode == SFmode && out_n == 4
6032 && in_mode == SFmode && in_n == 4)
6033 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6034 break;
6035 case RS6000_BUILTIN_RECIP:
6036 if (VECTOR_UNIT_VSX_P (V2DFmode)
6037 && out_mode == DFmode && out_n == 2
6038 && in_mode == DFmode && in_n == 2)
6039 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6040 break;
6041 default:
6042 break;
6044 return NULL_TREE;
6047 /* Default CPU string for rs6000*_file_start functions. */
6048 static const char *rs6000_default_cpu;
6050 /* Do anything needed at the start of the asm file. */
6052 static void
6053 rs6000_file_start (void)
6055 char buffer[80];
6056 const char *start = buffer;
6057 FILE *file = asm_out_file;
6059 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6061 default_file_start ();
6063 if (flag_verbose_asm)
6065 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6067 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6069 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6070 start = "";
6073 if (global_options_set.x_rs6000_cpu_index)
6075 fprintf (file, "%s -mcpu=%s", start,
6076 processor_target_table[rs6000_cpu_index].name);
6077 start = "";
6080 if (global_options_set.x_rs6000_tune_index)
6082 fprintf (file, "%s -mtune=%s", start,
6083 processor_target_table[rs6000_tune_index].name);
6084 start = "";
6087 if (PPC405_ERRATUM77)
6089 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6090 start = "";
6093 #ifdef USING_ELFOS_H
6094 switch (rs6000_sdata)
6096 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6097 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6098 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6099 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6102 if (rs6000_sdata && g_switch_value)
6104 fprintf (file, "%s -G %d", start,
6105 g_switch_value);
6106 start = "";
6108 #endif
6110 if (*start == '\0')
6111 putc ('\n', file);
6114 #ifdef USING_ELFOS_H
6115 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6116 && !global_options_set.x_rs6000_cpu_index)
6118 fputs ("\t.machine ", asm_out_file);
6119 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6120 fputs ("power9\n", asm_out_file);
6121 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6122 fputs ("power8\n", asm_out_file);
6123 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6124 fputs ("power7\n", asm_out_file);
6125 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6126 fputs ("power6\n", asm_out_file);
6127 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6128 fputs ("power5\n", asm_out_file);
6129 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6130 fputs ("power4\n", asm_out_file);
6131 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6132 fputs ("ppc64\n", asm_out_file);
6133 else
6134 fputs ("ppc\n", asm_out_file);
6136 #endif
6138 if (DEFAULT_ABI == ABI_ELFv2)
6139 fprintf (file, "\t.abiversion 2\n");
6143 /* Return nonzero if this function is known to have a null epilogue. */
6146 direct_return (void)
6148 if (reload_completed)
6150 rs6000_stack_t *info = rs6000_stack_info ();
6152 if (info->first_gp_reg_save == 32
6153 && info->first_fp_reg_save == 64
6154 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6155 && ! info->lr_save_p
6156 && ! info->cr_save_p
6157 && info->vrsave_size == 0
6158 && ! info->push_p)
6159 return 1;
6162 return 0;
6165 /* Return the number of instructions it takes to form a constant in an
6166 integer register. */
6169 num_insns_constant_wide (HOST_WIDE_INT value)
6171 /* signed constant loadable with addi */
6172 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6173 return 1;
6175 /* constant loadable with addis */
6176 else if ((value & 0xffff) == 0
6177 && (value >> 31 == -1 || value >> 31 == 0))
6178 return 1;
6180 else if (TARGET_POWERPC64)
6182 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6183 HOST_WIDE_INT high = value >> 31;
6185 if (high == 0 || high == -1)
6186 return 2;
6188 high >>= 1;
6190 if (low == 0)
6191 return num_insns_constant_wide (high) + 1;
6192 else if (high == 0)
6193 return num_insns_constant_wide (low) + 1;
6194 else
6195 return (num_insns_constant_wide (high)
6196 + num_insns_constant_wide (low) + 1);
6199 else
6200 return 2;
6204 num_insns_constant (rtx op, machine_mode mode)
6206 HOST_WIDE_INT low, high;
6208 switch (GET_CODE (op))
6210 case CONST_INT:
6211 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6212 && rs6000_is_valid_and_mask (op, mode))
6213 return 2;
6214 else
6215 return num_insns_constant_wide (INTVAL (op));
6217 case CONST_WIDE_INT:
6219 int i;
6220 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6221 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6222 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6223 return ins;
6226 case CONST_DOUBLE:
6227 if (mode == SFmode || mode == SDmode)
6229 long l;
6231 if (DECIMAL_FLOAT_MODE_P (mode))
6232 REAL_VALUE_TO_TARGET_DECIMAL32
6233 (*CONST_DOUBLE_REAL_VALUE (op), l);
6234 else
6235 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6236 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6239 long l[2];
6240 if (DECIMAL_FLOAT_MODE_P (mode))
6241 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6242 else
6243 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6244 high = l[WORDS_BIG_ENDIAN == 0];
6245 low = l[WORDS_BIG_ENDIAN != 0];
6247 if (TARGET_32BIT)
6248 return (num_insns_constant_wide (low)
6249 + num_insns_constant_wide (high));
6250 else
6252 if ((high == 0 && low >= 0)
6253 || (high == -1 && low < 0))
6254 return num_insns_constant_wide (low);
6256 else if (rs6000_is_valid_and_mask (op, mode))
6257 return 2;
6259 else if (low == 0)
6260 return num_insns_constant_wide (high) + 1;
6262 else
6263 return (num_insns_constant_wide (high)
6264 + num_insns_constant_wide (low) + 1);
6267 default:
6268 gcc_unreachable ();
6272 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6273 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6274 corresponding element of the vector, but for V4SFmode and V2SFmode,
6275 the corresponding "float" is interpreted as an SImode integer. */
6277 HOST_WIDE_INT
6278 const_vector_elt_as_int (rtx op, unsigned int elt)
6280 rtx tmp;
6282 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6283 gcc_assert (GET_MODE (op) != V2DImode
6284 && GET_MODE (op) != V2DFmode);
6286 tmp = CONST_VECTOR_ELT (op, elt);
6287 if (GET_MODE (op) == V4SFmode
6288 || GET_MODE (op) == V2SFmode)
6289 tmp = gen_lowpart (SImode, tmp);
6290 return INTVAL (tmp);
6293 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6294 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6295 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6296 all items are set to the same value and contain COPIES replicas of the
6297 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6298 operand and the others are set to the value of the operand's msb. */
6300 static bool
6301 vspltis_constant (rtx op, unsigned step, unsigned copies)
6303 machine_mode mode = GET_MODE (op);
6304 machine_mode inner = GET_MODE_INNER (mode);
6306 unsigned i;
6307 unsigned nunits;
6308 unsigned bitsize;
6309 unsigned mask;
6311 HOST_WIDE_INT val;
6312 HOST_WIDE_INT splat_val;
6313 HOST_WIDE_INT msb_val;
6315 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6316 return false;
6318 nunits = GET_MODE_NUNITS (mode);
6319 bitsize = GET_MODE_BITSIZE (inner);
6320 mask = GET_MODE_MASK (inner);
6322 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6323 splat_val = val;
6324 msb_val = val >= 0 ? 0 : -1;
6326 /* Construct the value to be splatted, if possible. If not, return 0. */
6327 for (i = 2; i <= copies; i *= 2)
6329 HOST_WIDE_INT small_val;
6330 bitsize /= 2;
6331 small_val = splat_val >> bitsize;
6332 mask >>= bitsize;
6333 if (splat_val != ((HOST_WIDE_INT)
6334 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6335 | (small_val & mask)))
6336 return false;
6337 splat_val = small_val;
6340 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6341 if (EASY_VECTOR_15 (splat_val))
6344 /* Also check if we can splat, and then add the result to itself. Do so if
6345 the value is positive, of if the splat instruction is using OP's mode;
6346 for splat_val < 0, the splat and the add should use the same mode. */
6347 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6348 && (splat_val >= 0 || (step == 1 && copies == 1)))
6351 /* Also check if are loading up the most significant bit which can be done by
6352 loading up -1 and shifting the value left by -1. */
6353 else if (EASY_VECTOR_MSB (splat_val, inner))
6356 else
6357 return false;
6359 /* Check if VAL is present in every STEP-th element, and the
6360 other elements are filled with its most significant bit. */
6361 for (i = 1; i < nunits; ++i)
6363 HOST_WIDE_INT desired_val;
6364 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6365 if ((i & (step - 1)) == 0)
6366 desired_val = val;
6367 else
6368 desired_val = msb_val;
6370 if (desired_val != const_vector_elt_as_int (op, elt))
6371 return false;
6374 return true;
6377 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6378 instruction, filling in the bottom elements with 0 or -1.
6380 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6381 for the number of zeroes to shift in, or negative for the number of 0xff
6382 bytes to shift in.
6384 OP is a CONST_VECTOR. */
6387 vspltis_shifted (rtx op)
6389 machine_mode mode = GET_MODE (op);
6390 machine_mode inner = GET_MODE_INNER (mode);
6392 unsigned i, j;
6393 unsigned nunits;
6394 unsigned mask;
6396 HOST_WIDE_INT val;
6398 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6399 return false;
6401 /* We need to create pseudo registers to do the shift, so don't recognize
6402 shift vector constants after reload. */
6403 if (!can_create_pseudo_p ())
6404 return false;
6406 nunits = GET_MODE_NUNITS (mode);
6407 mask = GET_MODE_MASK (inner);
6409 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6411 /* Check if the value can really be the operand of a vspltis[bhw]. */
6412 if (EASY_VECTOR_15 (val))
6415 /* Also check if we are loading up the most significant bit which can be done
6416 by loading up -1 and shifting the value left by -1. */
6417 else if (EASY_VECTOR_MSB (val, inner))
6420 else
6421 return 0;
6423 /* Check if VAL is present in every STEP-th element until we find elements
6424 that are 0 or all 1 bits. */
6425 for (i = 1; i < nunits; ++i)
6427 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6428 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6430 /* If the value isn't the splat value, check for the remaining elements
6431 being 0/-1. */
6432 if (val != elt_val)
6434 if (elt_val == 0)
6436 for (j = i+1; j < nunits; ++j)
6438 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6439 if (const_vector_elt_as_int (op, elt2) != 0)
6440 return 0;
6443 return (nunits - i) * GET_MODE_SIZE (inner);
6446 else if ((elt_val & mask) == mask)
6448 for (j = i+1; j < nunits; ++j)
6450 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6451 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6452 return 0;
6455 return -((nunits - i) * GET_MODE_SIZE (inner));
6458 else
6459 return 0;
6463 /* If all elements are equal, we don't need to do VLSDOI. */
6464 return 0;
6468 /* Return true if OP is of the given MODE and can be synthesized
6469 with a vspltisb, vspltish or vspltisw. */
6471 bool
6472 easy_altivec_constant (rtx op, machine_mode mode)
6474 unsigned step, copies;
6476 if (mode == VOIDmode)
6477 mode = GET_MODE (op);
6478 else if (mode != GET_MODE (op))
6479 return false;
6481 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6482 constants. */
6483 if (mode == V2DFmode)
6484 return zero_constant (op, mode);
6486 else if (mode == V2DImode)
6488 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6489 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6490 return false;
6492 if (zero_constant (op, mode))
6493 return true;
6495 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6496 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6497 return true;
6499 return false;
6502 /* V1TImode is a special container for TImode. Ignore for now. */
6503 else if (mode == V1TImode)
6504 return false;
6506 /* Start with a vspltisw. */
6507 step = GET_MODE_NUNITS (mode) / 4;
6508 copies = 1;
6510 if (vspltis_constant (op, step, copies))
6511 return true;
6513 /* Then try with a vspltish. */
6514 if (step == 1)
6515 copies <<= 1;
6516 else
6517 step >>= 1;
6519 if (vspltis_constant (op, step, copies))
6520 return true;
6522 /* And finally a vspltisb. */
6523 if (step == 1)
6524 copies <<= 1;
6525 else
6526 step >>= 1;
6528 if (vspltis_constant (op, step, copies))
6529 return true;
6531 if (vspltis_shifted (op) != 0)
6532 return true;
6534 return false;
6537 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6538 result is OP. Abort if it is not possible. */
6541 gen_easy_altivec_constant (rtx op)
6543 machine_mode mode = GET_MODE (op);
6544 int nunits = GET_MODE_NUNITS (mode);
6545 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6546 unsigned step = nunits / 4;
6547 unsigned copies = 1;
6549 /* Start with a vspltisw. */
6550 if (vspltis_constant (op, step, copies))
6551 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6553 /* Then try with a vspltish. */
6554 if (step == 1)
6555 copies <<= 1;
6556 else
6557 step >>= 1;
6559 if (vspltis_constant (op, step, copies))
6560 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6562 /* And finally a vspltisb. */
6563 if (step == 1)
6564 copies <<= 1;
6565 else
6566 step >>= 1;
6568 if (vspltis_constant (op, step, copies))
6569 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6571 gcc_unreachable ();
6574 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6575 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6577 Return the number of instructions needed (1 or 2) into the address pointed
6578 via NUM_INSNS_PTR.
6580 Return the constant that is being split via CONSTANT_PTR. */
6582 bool
6583 xxspltib_constant_p (rtx op,
6584 machine_mode mode,
6585 int *num_insns_ptr,
6586 int *constant_ptr)
6588 size_t nunits = GET_MODE_NUNITS (mode);
6589 size_t i;
6590 HOST_WIDE_INT value;
6591 rtx element;
6593 /* Set the returned values to out of bound values. */
6594 *num_insns_ptr = -1;
6595 *constant_ptr = 256;
6597 if (!TARGET_P9_VECTOR)
6598 return false;
6600 if (mode == VOIDmode)
6601 mode = GET_MODE (op);
6603 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6604 return false;
6606 /* Handle (vec_duplicate <constant>). */
6607 if (GET_CODE (op) == VEC_DUPLICATE)
6609 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6610 && mode != V2DImode)
6611 return false;
6613 element = XEXP (op, 0);
6614 if (!CONST_INT_P (element))
6615 return false;
6617 value = INTVAL (element);
6618 if (!IN_RANGE (value, -128, 127))
6619 return false;
6622 /* Handle (const_vector [...]). */
6623 else if (GET_CODE (op) == CONST_VECTOR)
6625 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6626 && mode != V2DImode)
6627 return false;
6629 element = CONST_VECTOR_ELT (op, 0);
6630 if (!CONST_INT_P (element))
6631 return false;
6633 value = INTVAL (element);
6634 if (!IN_RANGE (value, -128, 127))
6635 return false;
6637 for (i = 1; i < nunits; i++)
6639 element = CONST_VECTOR_ELT (op, i);
6640 if (!CONST_INT_P (element))
6641 return false;
6643 if (value != INTVAL (element))
6644 return false;
6648 /* Handle integer constants being loaded into the upper part of the VSX
6649 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6650 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6651 else if (CONST_INT_P (op))
6653 if (!SCALAR_INT_MODE_P (mode))
6654 return false;
6656 value = INTVAL (op);
6657 if (!IN_RANGE (value, -128, 127))
6658 return false;
6660 if (!IN_RANGE (value, -1, 0))
6662 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6663 return false;
6665 if (EASY_VECTOR_15 (value))
6666 return false;
6670 else
6671 return false;
6673 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6674 sign extend. Special case 0/-1 to allow getting any VSX register instead
6675 of an Altivec register. */
6676 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6677 && EASY_VECTOR_15 (value))
6678 return false;
6680 /* Return # of instructions and the constant byte for XXSPLTIB. */
6681 if (mode == V16QImode)
6682 *num_insns_ptr = 1;
6684 else if (IN_RANGE (value, -1, 0))
6685 *num_insns_ptr = 1;
6687 else
6688 *num_insns_ptr = 2;
6690 *constant_ptr = (int) value;
6691 return true;
6694 const char *
6695 output_vec_const_move (rtx *operands)
6697 int shift;
6698 machine_mode mode;
6699 rtx dest, vec;
6701 dest = operands[0];
6702 vec = operands[1];
6703 mode = GET_MODE (dest);
6705 if (TARGET_VSX)
6707 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6708 int xxspltib_value = 256;
6709 int num_insns = -1;
6711 if (zero_constant (vec, mode))
6713 if (TARGET_P9_VECTOR)
6714 return "xxspltib %x0,0";
6716 else if (dest_vmx_p)
6717 return "vspltisw %0,0";
6719 else
6720 return "xxlxor %x0,%x0,%x0";
6723 if (all_ones_constant (vec, mode))
6725 if (TARGET_P9_VECTOR)
6726 return "xxspltib %x0,255";
6728 else if (dest_vmx_p)
6729 return "vspltisw %0,-1";
6731 else if (TARGET_P8_VECTOR)
6732 return "xxlorc %x0,%x0,%x0";
6734 else
6735 gcc_unreachable ();
6738 if (TARGET_P9_VECTOR
6739 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6741 if (num_insns == 1)
6743 operands[2] = GEN_INT (xxspltib_value & 0xff);
6744 return "xxspltib %x0,%2";
6747 return "#";
6751 if (TARGET_ALTIVEC)
6753 rtx splat_vec;
6755 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6756 if (zero_constant (vec, mode))
6757 return "vspltisw %0,0";
6759 if (all_ones_constant (vec, mode))
6760 return "vspltisw %0,-1";
6762 /* Do we need to construct a value using VSLDOI? */
6763 shift = vspltis_shifted (vec);
6764 if (shift != 0)
6765 return "#";
6767 splat_vec = gen_easy_altivec_constant (vec);
6768 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6769 operands[1] = XEXP (splat_vec, 0);
6770 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6771 return "#";
6773 switch (GET_MODE (splat_vec))
6775 case E_V4SImode:
6776 return "vspltisw %0,%1";
6778 case E_V8HImode:
6779 return "vspltish %0,%1";
6781 case E_V16QImode:
6782 return "vspltisb %0,%1";
6784 default:
6785 gcc_unreachable ();
6789 gcc_unreachable ();
6792 /* Initialize TARGET of vector PAIRED to VALS. */
6794 void
6795 paired_expand_vector_init (rtx target, rtx vals)
6797 machine_mode mode = GET_MODE (target);
6798 int n_elts = GET_MODE_NUNITS (mode);
6799 int n_var = 0;
6800 rtx x, new_rtx, tmp, constant_op, op1, op2;
6801 int i;
6803 for (i = 0; i < n_elts; ++i)
6805 x = XVECEXP (vals, 0, i);
6806 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6807 ++n_var;
6809 if (n_var == 0)
6811 /* Load from constant pool. */
6812 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6813 return;
6816 if (n_var == 2)
6818 /* The vector is initialized only with non-constants. */
6819 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6820 XVECEXP (vals, 0, 1));
6822 emit_move_insn (target, new_rtx);
6823 return;
6826 /* One field is non-constant and the other one is a constant. Load the
6827 constant from the constant pool and use ps_merge instruction to
6828 construct the whole vector. */
6829 op1 = XVECEXP (vals, 0, 0);
6830 op2 = XVECEXP (vals, 0, 1);
6832 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6834 tmp = gen_reg_rtx (GET_MODE (constant_op));
6835 emit_move_insn (tmp, constant_op);
6837 if (CONSTANT_P (op1))
6838 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6839 else
6840 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6842 emit_move_insn (target, new_rtx);
6845 void
6846 paired_expand_vector_move (rtx operands[])
6848 rtx op0 = operands[0], op1 = operands[1];
6850 emit_move_insn (op0, op1);
6853 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6854 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6855 operands for the relation operation COND. This is a recursive
6856 function. */
6858 static void
6859 paired_emit_vector_compare (enum rtx_code rcode,
6860 rtx dest, rtx op0, rtx op1,
6861 rtx cc_op0, rtx cc_op1)
6863 rtx tmp = gen_reg_rtx (V2SFmode);
6864 rtx tmp1, max, min;
6866 gcc_assert (TARGET_PAIRED_FLOAT);
6867 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6869 switch (rcode)
6871 case LT:
6872 case LTU:
6873 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6874 return;
6875 case GE:
6876 case GEU:
6877 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6878 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6879 return;
6880 case LE:
6881 case LEU:
6882 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6883 return;
6884 case GT:
6885 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6886 return;
6887 case EQ:
6888 tmp1 = gen_reg_rtx (V2SFmode);
6889 max = gen_reg_rtx (V2SFmode);
6890 min = gen_reg_rtx (V2SFmode);
6891 gen_reg_rtx (V2SFmode);
6893 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6894 emit_insn (gen_selv2sf4
6895 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6896 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6897 emit_insn (gen_selv2sf4
6898 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6899 emit_insn (gen_subv2sf3 (tmp1, min, max));
6900 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6901 return;
6902 case NE:
6903 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6904 return;
6905 case UNLE:
6906 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6907 return;
6908 case UNLT:
6909 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6910 return;
6911 case UNGE:
6912 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6913 return;
6914 case UNGT:
6915 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6916 return;
6917 default:
6918 gcc_unreachable ();
6921 return;
6924 /* Emit vector conditional expression.
6925 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6926 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6929 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6930 rtx cond, rtx cc_op0, rtx cc_op1)
6932 enum rtx_code rcode = GET_CODE (cond);
6934 if (!TARGET_PAIRED_FLOAT)
6935 return 0;
6937 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6939 return 1;
6942 /* Initialize vector TARGET to VALS. */
6944 void
6945 rs6000_expand_vector_init (rtx target, rtx vals)
6947 machine_mode mode = GET_MODE (target);
6948 machine_mode inner_mode = GET_MODE_INNER (mode);
6949 int n_elts = GET_MODE_NUNITS (mode);
6950 int n_var = 0, one_var = -1;
6951 bool all_same = true, all_const_zero = true;
6952 rtx x, mem;
6953 int i;
6955 for (i = 0; i < n_elts; ++i)
6957 x = XVECEXP (vals, 0, i);
6958 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6959 ++n_var, one_var = i;
6960 else if (x != CONST0_RTX (inner_mode))
6961 all_const_zero = false;
6963 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6964 all_same = false;
6967 if (n_var == 0)
6969 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6970 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6971 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6973 /* Zero register. */
6974 emit_move_insn (target, CONST0_RTX (mode));
6975 return;
6977 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6979 /* Splat immediate. */
6980 emit_insn (gen_rtx_SET (target, const_vec));
6981 return;
6983 else
6985 /* Load from constant pool. */
6986 emit_move_insn (target, const_vec);
6987 return;
6991 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6992 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6994 rtx op[2];
6995 size_t i;
6996 size_t num_elements = all_same ? 1 : 2;
6997 for (i = 0; i < num_elements; i++)
6999 op[i] = XVECEXP (vals, 0, i);
7000 /* Just in case there is a SUBREG with a smaller mode, do a
7001 conversion. */
7002 if (GET_MODE (op[i]) != inner_mode)
7004 rtx tmp = gen_reg_rtx (inner_mode);
7005 convert_move (tmp, op[i], 0);
7006 op[i] = tmp;
7008 /* Allow load with splat double word. */
7009 else if (MEM_P (op[i]))
7011 if (!all_same)
7012 op[i] = force_reg (inner_mode, op[i]);
7014 else if (!REG_P (op[i]))
7015 op[i] = force_reg (inner_mode, op[i]);
7018 if (all_same)
7020 if (mode == V2DFmode)
7021 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7022 else
7023 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7025 else
7027 if (mode == V2DFmode)
7028 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7029 else
7030 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7032 return;
7035 /* Special case initializing vector int if we are on 64-bit systems with
7036 direct move or we have the ISA 3.0 instructions. */
7037 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7038 && TARGET_DIRECT_MOVE_64BIT)
7040 if (all_same)
7042 rtx element0 = XVECEXP (vals, 0, 0);
7043 if (MEM_P (element0))
7044 element0 = rs6000_address_for_fpconvert (element0);
7045 else
7046 element0 = force_reg (SImode, element0);
7048 if (TARGET_P9_VECTOR)
7049 emit_insn (gen_vsx_splat_v4si (target, element0));
7050 else
7052 rtx tmp = gen_reg_rtx (DImode);
7053 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7054 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7056 return;
7058 else
7060 rtx elements[4];
7061 size_t i;
7063 for (i = 0; i < 4; i++)
7065 elements[i] = XVECEXP (vals, 0, i);
7066 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7067 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7070 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7071 elements[2], elements[3]));
7072 return;
7076 /* With single precision floating point on VSX, know that internally single
7077 precision is actually represented as a double, and either make 2 V2DF
7078 vectors, and convert these vectors to single precision, or do one
7079 conversion, and splat the result to the other elements. */
7080 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7082 if (all_same)
7084 rtx element0 = XVECEXP (vals, 0, 0);
7086 if (TARGET_P9_VECTOR)
7088 if (MEM_P (element0))
7089 element0 = rs6000_address_for_fpconvert (element0);
7091 emit_insn (gen_vsx_splat_v4sf (target, element0));
7094 else
7096 rtx freg = gen_reg_rtx (V4SFmode);
7097 rtx sreg = force_reg (SFmode, element0);
7098 rtx cvt = (TARGET_XSCVDPSPN
7099 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7100 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7102 emit_insn (cvt);
7103 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7104 const0_rtx));
7107 else
7109 rtx dbl_even = gen_reg_rtx (V2DFmode);
7110 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7111 rtx flt_even = gen_reg_rtx (V4SFmode);
7112 rtx flt_odd = gen_reg_rtx (V4SFmode);
7113 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7114 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7115 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7116 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7118 /* Use VMRGEW if we can instead of doing a permute. */
7119 if (TARGET_P8_VECTOR)
7121 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7122 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7123 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7124 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7125 if (BYTES_BIG_ENDIAN)
7126 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7127 else
7128 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7130 else
7132 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7133 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7134 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7135 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7136 rs6000_expand_extract_even (target, flt_even, flt_odd);
7139 return;
7142 /* Special case initializing vector short/char that are splats if we are on
7143 64-bit systems with direct move. */
7144 if (all_same && TARGET_DIRECT_MOVE_64BIT
7145 && (mode == V16QImode || mode == V8HImode))
7147 rtx op0 = XVECEXP (vals, 0, 0);
7148 rtx di_tmp = gen_reg_rtx (DImode);
7150 if (!REG_P (op0))
7151 op0 = force_reg (GET_MODE_INNER (mode), op0);
7153 if (mode == V16QImode)
7155 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7156 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7157 return;
7160 if (mode == V8HImode)
7162 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7163 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7164 return;
7168 /* Store value to stack temp. Load vector element. Splat. However, splat
7169 of 64-bit items is not supported on Altivec. */
7170 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7172 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7173 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7174 XVECEXP (vals, 0, 0));
7175 x = gen_rtx_UNSPEC (VOIDmode,
7176 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7177 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7178 gen_rtvec (2,
7179 gen_rtx_SET (target, mem),
7180 x)));
7181 x = gen_rtx_VEC_SELECT (inner_mode, target,
7182 gen_rtx_PARALLEL (VOIDmode,
7183 gen_rtvec (1, const0_rtx)));
7184 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7185 return;
7188 /* One field is non-constant. Load constant then overwrite
7189 varying field. */
7190 if (n_var == 1)
7192 rtx copy = copy_rtx (vals);
7194 /* Load constant part of vector, substitute neighboring value for
7195 varying element. */
7196 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7197 rs6000_expand_vector_init (target, copy);
7199 /* Insert variable. */
7200 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7201 return;
7204 /* Construct the vector in memory one field at a time
7205 and load the whole vector. */
7206 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7207 for (i = 0; i < n_elts; i++)
7208 emit_move_insn (adjust_address_nv (mem, inner_mode,
7209 i * GET_MODE_SIZE (inner_mode)),
7210 XVECEXP (vals, 0, i));
7211 emit_move_insn (target, mem);
7214 /* Set field ELT of TARGET to VAL. */
7216 void
7217 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7219 machine_mode mode = GET_MODE (target);
7220 machine_mode inner_mode = GET_MODE_INNER (mode);
7221 rtx reg = gen_reg_rtx (mode);
7222 rtx mask, mem, x;
7223 int width = GET_MODE_SIZE (inner_mode);
7224 int i;
7226 val = force_reg (GET_MODE (val), val);
7228 if (VECTOR_MEM_VSX_P (mode))
7230 rtx insn = NULL_RTX;
7231 rtx elt_rtx = GEN_INT (elt);
7233 if (mode == V2DFmode)
7234 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7236 else if (mode == V2DImode)
7237 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7239 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7241 if (mode == V4SImode)
7242 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7243 else if (mode == V8HImode)
7244 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7245 else if (mode == V16QImode)
7246 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7247 else if (mode == V4SFmode)
7248 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7251 if (insn)
7253 emit_insn (insn);
7254 return;
7258 /* Simplify setting single element vectors like V1TImode. */
7259 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7261 emit_move_insn (target, gen_lowpart (mode, val));
7262 return;
7265 /* Load single variable value. */
7266 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7267 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7268 x = gen_rtx_UNSPEC (VOIDmode,
7269 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7270 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7271 gen_rtvec (2,
7272 gen_rtx_SET (reg, mem),
7273 x)));
7275 /* Linear sequence. */
7276 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7277 for (i = 0; i < 16; ++i)
7278 XVECEXP (mask, 0, i) = GEN_INT (i);
7280 /* Set permute mask to insert element into target. */
7281 for (i = 0; i < width; ++i)
7282 XVECEXP (mask, 0, elt*width + i)
7283 = GEN_INT (i + 0x10);
7284 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7286 if (BYTES_BIG_ENDIAN)
7287 x = gen_rtx_UNSPEC (mode,
7288 gen_rtvec (3, target, reg,
7289 force_reg (V16QImode, x)),
7290 UNSPEC_VPERM);
7291 else
7293 if (TARGET_P9_VECTOR)
7294 x = gen_rtx_UNSPEC (mode,
7295 gen_rtvec (3, reg, target,
7296 force_reg (V16QImode, x)),
7297 UNSPEC_VPERMR);
7298 else
7300 /* Invert selector. We prefer to generate VNAND on P8 so
7301 that future fusion opportunities can kick in, but must
7302 generate VNOR elsewhere. */
7303 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7304 rtx iorx = (TARGET_P8_VECTOR
7305 ? gen_rtx_IOR (V16QImode, notx, notx)
7306 : gen_rtx_AND (V16QImode, notx, notx));
7307 rtx tmp = gen_reg_rtx (V16QImode);
7308 emit_insn (gen_rtx_SET (tmp, iorx));
7310 /* Permute with operands reversed and adjusted selector. */
7311 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7312 UNSPEC_VPERM);
7316 emit_insn (gen_rtx_SET (target, x));
7319 /* Extract field ELT from VEC into TARGET. */
7321 void
7322 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7324 machine_mode mode = GET_MODE (vec);
7325 machine_mode inner_mode = GET_MODE_INNER (mode);
7326 rtx mem;
7328 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7330 switch (mode)
7332 default:
7333 break;
7334 case E_V1TImode:
7335 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7336 emit_move_insn (target, gen_lowpart (TImode, vec));
7337 break;
7338 case E_V2DFmode:
7339 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7340 return;
7341 case E_V2DImode:
7342 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7343 return;
7344 case E_V4SFmode:
7345 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7346 return;
7347 case E_V16QImode:
7348 if (TARGET_DIRECT_MOVE_64BIT)
7350 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7351 return;
7353 else
7354 break;
7355 case E_V8HImode:
7356 if (TARGET_DIRECT_MOVE_64BIT)
7358 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7359 return;
7361 else
7362 break;
7363 case E_V4SImode:
7364 if (TARGET_DIRECT_MOVE_64BIT)
7366 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7367 return;
7369 break;
7372 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7373 && TARGET_DIRECT_MOVE_64BIT)
7375 if (GET_MODE (elt) != DImode)
7377 rtx tmp = gen_reg_rtx (DImode);
7378 convert_move (tmp, elt, 0);
7379 elt = tmp;
7381 else if (!REG_P (elt))
7382 elt = force_reg (DImode, elt);
7384 switch (mode)
7386 case E_V2DFmode:
7387 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7388 return;
7390 case E_V2DImode:
7391 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7392 return;
7394 case E_V4SFmode:
7395 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7396 return;
7398 case E_V4SImode:
7399 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7400 return;
7402 case E_V8HImode:
7403 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7404 return;
7406 case E_V16QImode:
7407 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7408 return;
7410 default:
7411 gcc_unreachable ();
7415 gcc_assert (CONST_INT_P (elt));
7417 /* Allocate mode-sized buffer. */
7418 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7420 emit_move_insn (mem, vec);
7422 /* Add offset to field within buffer matching vector element. */
7423 mem = adjust_address_nv (mem, inner_mode,
7424 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7426 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7429 /* Helper function to return the register number of a RTX. */
7430 static inline int
7431 regno_or_subregno (rtx op)
7433 if (REG_P (op))
7434 return REGNO (op);
7435 else if (SUBREG_P (op))
7436 return subreg_regno (op);
7437 else
7438 gcc_unreachable ();
7441 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7442 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7443 temporary (BASE_TMP) to fixup the address. Return the new memory address
7444 that is valid for reads or writes to a given register (SCALAR_REG). */
7447 rs6000_adjust_vec_address (rtx scalar_reg,
7448 rtx mem,
7449 rtx element,
7450 rtx base_tmp,
7451 machine_mode scalar_mode)
7453 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7454 rtx addr = XEXP (mem, 0);
7455 rtx element_offset;
7456 rtx new_addr;
7457 bool valid_addr_p;
7459 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7460 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7462 /* Calculate what we need to add to the address to get the element
7463 address. */
7464 if (CONST_INT_P (element))
7465 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7466 else
7468 int byte_shift = exact_log2 (scalar_size);
7469 gcc_assert (byte_shift >= 0);
7471 if (byte_shift == 0)
7472 element_offset = element;
7474 else
7476 if (TARGET_POWERPC64)
7477 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7478 else
7479 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7481 element_offset = base_tmp;
7485 /* Create the new address pointing to the element within the vector. If we
7486 are adding 0, we don't have to change the address. */
7487 if (element_offset == const0_rtx)
7488 new_addr = addr;
7490 /* A simple indirect address can be converted into a reg + offset
7491 address. */
7492 else if (REG_P (addr) || SUBREG_P (addr))
7493 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7495 /* Optimize D-FORM addresses with constant offset with a constant element, to
7496 include the element offset in the address directly. */
7497 else if (GET_CODE (addr) == PLUS)
7499 rtx op0 = XEXP (addr, 0);
7500 rtx op1 = XEXP (addr, 1);
7501 rtx insn;
7503 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7504 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7506 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7507 rtx offset_rtx = GEN_INT (offset);
7509 if (IN_RANGE (offset, -32768, 32767)
7510 && (scalar_size < 8 || (offset & 0x3) == 0))
7511 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7512 else
7514 emit_move_insn (base_tmp, offset_rtx);
7515 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7518 else
7520 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7521 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7523 /* Note, ADDI requires the register being added to be a base
7524 register. If the register was R0, load it up into the temporary
7525 and do the add. */
7526 if (op1_reg_p
7527 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7529 insn = gen_add3_insn (base_tmp, op1, element_offset);
7530 gcc_assert (insn != NULL_RTX);
7531 emit_insn (insn);
7534 else if (ele_reg_p
7535 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7537 insn = gen_add3_insn (base_tmp, element_offset, op1);
7538 gcc_assert (insn != NULL_RTX);
7539 emit_insn (insn);
7542 else
7544 emit_move_insn (base_tmp, op1);
7545 emit_insn (gen_add2_insn (base_tmp, element_offset));
7548 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7552 else
7554 emit_move_insn (base_tmp, addr);
7555 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7558 /* If we have a PLUS, we need to see whether the particular register class
7559 allows for D-FORM or X-FORM addressing. */
7560 if (GET_CODE (new_addr) == PLUS)
7562 rtx op1 = XEXP (new_addr, 1);
7563 addr_mask_type addr_mask;
7564 int scalar_regno = regno_or_subregno (scalar_reg);
7566 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7567 if (INT_REGNO_P (scalar_regno))
7568 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7570 else if (FP_REGNO_P (scalar_regno))
7571 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7573 else if (ALTIVEC_REGNO_P (scalar_regno))
7574 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7576 else
7577 gcc_unreachable ();
7579 if (REG_P (op1) || SUBREG_P (op1))
7580 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7581 else
7582 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7585 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7586 valid_addr_p = true;
7588 else
7589 valid_addr_p = false;
7591 if (!valid_addr_p)
7593 emit_move_insn (base_tmp, new_addr);
7594 new_addr = base_tmp;
7597 return change_address (mem, scalar_mode, new_addr);
7600 /* Split a variable vec_extract operation into the component instructions. */
7602 void
7603 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7604 rtx tmp_altivec)
7606 machine_mode mode = GET_MODE (src);
7607 machine_mode scalar_mode = GET_MODE (dest);
7608 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7609 int byte_shift = exact_log2 (scalar_size);
7611 gcc_assert (byte_shift >= 0);
7613 /* If we are given a memory address, optimize to load just the element. We
7614 don't have to adjust the vector element number on little endian
7615 systems. */
7616 if (MEM_P (src))
7618 gcc_assert (REG_P (tmp_gpr));
7619 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7620 tmp_gpr, scalar_mode));
7621 return;
7624 else if (REG_P (src) || SUBREG_P (src))
7626 int bit_shift = byte_shift + 3;
7627 rtx element2;
7628 int dest_regno = regno_or_subregno (dest);
7629 int src_regno = regno_or_subregno (src);
7630 int element_regno = regno_or_subregno (element);
7632 gcc_assert (REG_P (tmp_gpr));
7634 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7635 a general purpose register. */
7636 if (TARGET_P9_VECTOR
7637 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7638 && INT_REGNO_P (dest_regno)
7639 && ALTIVEC_REGNO_P (src_regno)
7640 && INT_REGNO_P (element_regno))
7642 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7643 rtx element_si = gen_rtx_REG (SImode, element_regno);
7645 if (mode == V16QImode)
7646 emit_insn (VECTOR_ELT_ORDER_BIG
7647 ? gen_vextublx (dest_si, element_si, src)
7648 : gen_vextubrx (dest_si, element_si, src));
7650 else if (mode == V8HImode)
7652 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7653 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7654 emit_insn (VECTOR_ELT_ORDER_BIG
7655 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7656 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7660 else
7662 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7663 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7664 emit_insn (VECTOR_ELT_ORDER_BIG
7665 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7666 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7669 return;
7673 gcc_assert (REG_P (tmp_altivec));
7675 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7676 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7677 will shift the element into the upper position (adding 3 to convert a
7678 byte shift into a bit shift). */
7679 if (scalar_size == 8)
7681 if (!VECTOR_ELT_ORDER_BIG)
7683 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7684 element2 = tmp_gpr;
7686 else
7687 element2 = element;
7689 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7690 bit. */
7691 emit_insn (gen_rtx_SET (tmp_gpr,
7692 gen_rtx_AND (DImode,
7693 gen_rtx_ASHIFT (DImode,
7694 element2,
7695 GEN_INT (6)),
7696 GEN_INT (64))));
7698 else
7700 if (!VECTOR_ELT_ORDER_BIG)
7702 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7704 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7705 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7706 element2 = tmp_gpr;
7708 else
7709 element2 = element;
7711 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7714 /* Get the value into the lower byte of the Altivec register where VSLO
7715 expects it. */
7716 if (TARGET_P9_VECTOR)
7717 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7718 else if (can_create_pseudo_p ())
7719 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7720 else
7722 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7723 emit_move_insn (tmp_di, tmp_gpr);
7724 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7727 /* Do the VSLO to get the value into the final location. */
7728 switch (mode)
7730 case E_V2DFmode:
7731 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7732 return;
7734 case E_V2DImode:
7735 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7736 return;
7738 case E_V4SFmode:
7740 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7741 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7742 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7743 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7744 tmp_altivec));
7746 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7747 return;
7750 case E_V4SImode:
7751 case E_V8HImode:
7752 case E_V16QImode:
7754 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7755 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7756 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7757 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7758 tmp_altivec));
7759 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7760 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7761 GEN_INT (64 - (8 * scalar_size))));
7762 return;
7765 default:
7766 gcc_unreachable ();
7769 return;
7771 else
7772 gcc_unreachable ();
7775 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7776 two SImode values. */
7778 static void
7779 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7781 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7783 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7785 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7786 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7788 emit_move_insn (dest, GEN_INT (const1 | const2));
7789 return;
7792 /* Put si1 into upper 32-bits of dest. */
7793 if (CONST_INT_P (si1))
7794 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7795 else
7797 /* Generate RLDIC. */
7798 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7799 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7800 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7801 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7802 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7803 emit_insn (gen_rtx_SET (dest, and_rtx));
7806 /* Put si2 into the temporary. */
7807 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7808 if (CONST_INT_P (si2))
7809 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7810 else
7811 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7813 /* Combine the two parts. */
7814 emit_insn (gen_iordi3 (dest, dest, tmp));
7815 return;
7818 /* Split a V4SI initialization. */
7820 void
7821 rs6000_split_v4si_init (rtx operands[])
7823 rtx dest = operands[0];
7825 /* Destination is a GPR, build up the two DImode parts in place. */
7826 if (REG_P (dest) || SUBREG_P (dest))
7828 int d_regno = regno_or_subregno (dest);
7829 rtx scalar1 = operands[1];
7830 rtx scalar2 = operands[2];
7831 rtx scalar3 = operands[3];
7832 rtx scalar4 = operands[4];
7833 rtx tmp1 = operands[5];
7834 rtx tmp2 = operands[6];
7836 /* Even though we only need one temporary (plus the destination, which
7837 has an early clobber constraint, try to use two temporaries, one for
7838 each double word created. That way the 2nd insn scheduling pass can
7839 rearrange things so the two parts are done in parallel. */
7840 if (BYTES_BIG_ENDIAN)
7842 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7843 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7844 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7845 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7847 else
7849 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7850 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7851 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7852 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7853 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7855 return;
7858 else
7859 gcc_unreachable ();
7862 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7863 selects whether the alignment is abi mandated, optional, or
7864 both abi and optional alignment. */
7866 unsigned int
7867 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7869 if (how != align_opt)
7871 if (TREE_CODE (type) == VECTOR_TYPE)
7873 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7875 if (align < 64)
7876 align = 64;
7878 else if (align < 128)
7879 align = 128;
7883 if (how != align_abi)
7885 if (TREE_CODE (type) == ARRAY_TYPE
7886 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7888 if (align < BITS_PER_WORD)
7889 align = BITS_PER_WORD;
7893 return align;
7896 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7897 instructions simply ignore the low bits; VSX memory instructions
7898 are aligned to 4 or 8 bytes. */
7900 static bool
7901 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7903 return (STRICT_ALIGNMENT
7904 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7905 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7906 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7907 && (int) align < VECTOR_ALIGN (mode)))));
7910 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7912 bool
7913 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7915 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7917 if (computed != 128)
7919 static bool warned;
7920 if (!warned && warn_psabi)
7922 warned = true;
7923 inform (input_location,
7924 "the layout of aggregates containing vectors with"
7925 " %d-byte alignment has changed in GCC 5",
7926 computed / BITS_PER_UNIT);
7929 /* In current GCC there is no special case. */
7930 return false;
7933 return false;
7936 /* AIX increases natural record alignment to doubleword if the first
7937 field is an FP double while the FP fields remain word aligned. */
7939 unsigned int
7940 rs6000_special_round_type_align (tree type, unsigned int computed,
7941 unsigned int specified)
7943 unsigned int align = MAX (computed, specified);
7944 tree field = TYPE_FIELDS (type);
7946 /* Skip all non field decls */
7947 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7948 field = DECL_CHAIN (field);
7950 if (field != NULL && field != type)
7952 type = TREE_TYPE (field);
7953 while (TREE_CODE (type) == ARRAY_TYPE)
7954 type = TREE_TYPE (type);
7956 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7957 align = MAX (align, 64);
7960 return align;
7963 /* Darwin increases record alignment to the natural alignment of
7964 the first field. */
7966 unsigned int
7967 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7968 unsigned int specified)
7970 unsigned int align = MAX (computed, specified);
7972 if (TYPE_PACKED (type))
7973 return align;
7975 /* Find the first field, looking down into aggregates. */
7976 do {
7977 tree field = TYPE_FIELDS (type);
7978 /* Skip all non field decls */
7979 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7980 field = DECL_CHAIN (field);
7981 if (! field)
7982 break;
7983 /* A packed field does not contribute any extra alignment. */
7984 if (DECL_PACKED (field))
7985 return align;
7986 type = TREE_TYPE (field);
7987 while (TREE_CODE (type) == ARRAY_TYPE)
7988 type = TREE_TYPE (type);
7989 } while (AGGREGATE_TYPE_P (type));
7991 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7992 align = MAX (align, TYPE_ALIGN (type));
7994 return align;
7997 /* Return 1 for an operand in small memory on V.4/eabi. */
8000 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8001 machine_mode mode ATTRIBUTE_UNUSED)
8003 #if TARGET_ELF
8004 rtx sym_ref;
8006 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8007 return 0;
8009 if (DEFAULT_ABI != ABI_V4)
8010 return 0;
8012 if (GET_CODE (op) == SYMBOL_REF)
8013 sym_ref = op;
8015 else if (GET_CODE (op) != CONST
8016 || GET_CODE (XEXP (op, 0)) != PLUS
8017 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8018 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8019 return 0;
8021 else
8023 rtx sum = XEXP (op, 0);
8024 HOST_WIDE_INT summand;
8026 /* We have to be careful here, because it is the referenced address
8027 that must be 32k from _SDA_BASE_, not just the symbol. */
8028 summand = INTVAL (XEXP (sum, 1));
8029 if (summand < 0 || summand > g_switch_value)
8030 return 0;
8032 sym_ref = XEXP (sum, 0);
8035 return SYMBOL_REF_SMALL_P (sym_ref);
8036 #else
8037 return 0;
8038 #endif
8041 /* Return true if either operand is a general purpose register. */
8043 bool
8044 gpr_or_gpr_p (rtx op0, rtx op1)
8046 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8047 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8050 /* Return true if this is a move direct operation between GPR registers and
8051 floating point/VSX registers. */
8053 bool
8054 direct_move_p (rtx op0, rtx op1)
8056 int regno0, regno1;
8058 if (!REG_P (op0) || !REG_P (op1))
8059 return false;
8061 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8062 return false;
8064 regno0 = REGNO (op0);
8065 regno1 = REGNO (op1);
8066 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8067 return false;
8069 if (INT_REGNO_P (regno0))
8070 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8072 else if (INT_REGNO_P (regno1))
8074 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8075 return true;
8077 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8078 return true;
8081 return false;
8084 /* Return true if the OFFSET is valid for the quad address instructions that
8085 use d-form (register + offset) addressing. */
8087 static inline bool
8088 quad_address_offset_p (HOST_WIDE_INT offset)
8090 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8093 /* Return true if the ADDR is an acceptable address for a quad memory
8094 operation of mode MODE (either LQ/STQ for general purpose registers, or
8095 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8096 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8097 3.0 LXV/STXV instruction. */
8099 bool
8100 quad_address_p (rtx addr, machine_mode mode, bool strict)
8102 rtx op0, op1;
8104 if (GET_MODE_SIZE (mode) != 16)
8105 return false;
8107 if (legitimate_indirect_address_p (addr, strict))
8108 return true;
8110 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8111 return false;
8113 if (GET_CODE (addr) != PLUS)
8114 return false;
8116 op0 = XEXP (addr, 0);
8117 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8118 return false;
8120 op1 = XEXP (addr, 1);
8121 if (!CONST_INT_P (op1))
8122 return false;
8124 return quad_address_offset_p (INTVAL (op1));
8127 /* Return true if this is a load or store quad operation. This function does
8128 not handle the atomic quad memory instructions. */
8130 bool
8131 quad_load_store_p (rtx op0, rtx op1)
8133 bool ret;
8135 if (!TARGET_QUAD_MEMORY)
8136 ret = false;
8138 else if (REG_P (op0) && MEM_P (op1))
8139 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8140 && quad_memory_operand (op1, GET_MODE (op1))
8141 && !reg_overlap_mentioned_p (op0, op1));
8143 else if (MEM_P (op0) && REG_P (op1))
8144 ret = (quad_memory_operand (op0, GET_MODE (op0))
8145 && quad_int_reg_operand (op1, GET_MODE (op1)));
8147 else
8148 ret = false;
8150 if (TARGET_DEBUG_ADDR)
8152 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8153 ret ? "true" : "false");
8154 debug_rtx (gen_rtx_SET (op0, op1));
8157 return ret;
8160 /* Given an address, return a constant offset term if one exists. */
8162 static rtx
8163 address_offset (rtx op)
8165 if (GET_CODE (op) == PRE_INC
8166 || GET_CODE (op) == PRE_DEC)
8167 op = XEXP (op, 0);
8168 else if (GET_CODE (op) == PRE_MODIFY
8169 || GET_CODE (op) == LO_SUM)
8170 op = XEXP (op, 1);
8172 if (GET_CODE (op) == CONST)
8173 op = XEXP (op, 0);
8175 if (GET_CODE (op) == PLUS)
8176 op = XEXP (op, 1);
8178 if (CONST_INT_P (op))
8179 return op;
8181 return NULL_RTX;
8184 /* Return true if the MEM operand is a memory operand suitable for use
8185 with a (full width, possibly multiple) gpr load/store. On
8186 powerpc64 this means the offset must be divisible by 4.
8187 Implements 'Y' constraint.
8189 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8190 a constraint function we know the operand has satisfied a suitable
8191 memory predicate. Also accept some odd rtl generated by reload
8192 (see rs6000_legitimize_reload_address for various forms). It is
8193 important that reload rtl be accepted by appropriate constraints
8194 but not by the operand predicate.
8196 Offsetting a lo_sum should not be allowed, except where we know by
8197 alignment that a 32k boundary is not crossed, but see the ???
8198 comment in rs6000_legitimize_reload_address. Note that by
8199 "offsetting" here we mean a further offset to access parts of the
8200 MEM. It's fine to have a lo_sum where the inner address is offset
8201 from a sym, since the same sym+offset will appear in the high part
8202 of the address calculation. */
8204 bool
8205 mem_operand_gpr (rtx op, machine_mode mode)
8207 unsigned HOST_WIDE_INT offset;
8208 int extra;
8209 rtx addr = XEXP (op, 0);
8211 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
8212 if (!rs6000_offsettable_memref_p (op, mode, false))
8213 return false;
8215 op = address_offset (addr);
8216 if (op == NULL_RTX)
8217 return true;
8219 offset = INTVAL (op);
8220 if (TARGET_POWERPC64 && (offset & 3) != 0)
8221 return false;
8223 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8224 if (extra < 0)
8225 extra = 0;
8227 if (GET_CODE (addr) == LO_SUM)
8228 /* For lo_sum addresses, we must allow any offset except one that
8229 causes a wrap, so test only the low 16 bits. */
8230 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8232 return offset + 0x8000 < 0x10000u - extra;
8235 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8236 enforce an offset divisible by 4 even for 32-bit. */
8238 bool
8239 mem_operand_ds_form (rtx op, machine_mode mode)
8241 unsigned HOST_WIDE_INT offset;
8242 int extra;
8243 rtx addr = XEXP (op, 0);
8245 if (!offsettable_address_p (false, mode, addr))
8246 return false;
8248 op = address_offset (addr);
8249 if (op == NULL_RTX)
8250 return true;
8252 offset = INTVAL (op);
8253 if ((offset & 3) != 0)
8254 return false;
8256 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8257 if (extra < 0)
8258 extra = 0;
8260 if (GET_CODE (addr) == LO_SUM)
8261 /* For lo_sum addresses, we must allow any offset except one that
8262 causes a wrap, so test only the low 16 bits. */
8263 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8265 return offset + 0x8000 < 0x10000u - extra;
8268 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8270 static bool
8271 reg_offset_addressing_ok_p (machine_mode mode)
8273 switch (mode)
8275 case E_V16QImode:
8276 case E_V8HImode:
8277 case E_V4SFmode:
8278 case E_V4SImode:
8279 case E_V2DFmode:
8280 case E_V2DImode:
8281 case E_V1TImode:
8282 case E_TImode:
8283 case E_TFmode:
8284 case E_KFmode:
8285 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8286 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8287 a vector mode, if we want to use the VSX registers to move it around,
8288 we need to restrict ourselves to reg+reg addressing. Similarly for
8289 IEEE 128-bit floating point that is passed in a single vector
8290 register. */
8291 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8292 return mode_supports_vsx_dform_quad (mode);
8293 break;
8295 case E_V2SImode:
8296 case E_V2SFmode:
8297 /* Paired vector modes. Only reg+reg addressing is valid. */
8298 if (TARGET_PAIRED_FLOAT)
8299 return false;
8300 break;
8302 case E_SDmode:
8303 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8304 addressing for the LFIWZX and STFIWX instructions. */
8305 if (TARGET_NO_SDMODE_STACK)
8306 return false;
8307 break;
8309 default:
8310 break;
8313 return true;
8316 static bool
8317 virtual_stack_registers_memory_p (rtx op)
8319 int regnum;
8321 if (GET_CODE (op) == REG)
8322 regnum = REGNO (op);
8324 else if (GET_CODE (op) == PLUS
8325 && GET_CODE (XEXP (op, 0)) == REG
8326 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8327 regnum = REGNO (XEXP (op, 0));
8329 else
8330 return false;
8332 return (regnum >= FIRST_VIRTUAL_REGISTER
8333 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8336 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8337 is known to not straddle a 32k boundary. This function is used
8338 to determine whether -mcmodel=medium code can use TOC pointer
8339 relative addressing for OP. This means the alignment of the TOC
8340 pointer must also be taken into account, and unfortunately that is
8341 only 8 bytes. */
8343 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8344 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8345 #endif
8347 static bool
8348 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8349 machine_mode mode)
8351 tree decl;
8352 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8354 if (GET_CODE (op) != SYMBOL_REF)
8355 return false;
8357 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8358 SYMBOL_REF. */
8359 if (mode_supports_vsx_dform_quad (mode))
8360 return false;
8362 dsize = GET_MODE_SIZE (mode);
8363 decl = SYMBOL_REF_DECL (op);
8364 if (!decl)
8366 if (dsize == 0)
8367 return false;
8369 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8370 replacing memory addresses with an anchor plus offset. We
8371 could find the decl by rummaging around in the block->objects
8372 VEC for the given offset but that seems like too much work. */
8373 dalign = BITS_PER_UNIT;
8374 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8375 && SYMBOL_REF_ANCHOR_P (op)
8376 && SYMBOL_REF_BLOCK (op) != NULL)
8378 struct object_block *block = SYMBOL_REF_BLOCK (op);
8380 dalign = block->alignment;
8381 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8383 else if (CONSTANT_POOL_ADDRESS_P (op))
8385 /* It would be nice to have get_pool_align().. */
8386 machine_mode cmode = get_pool_mode (op);
8388 dalign = GET_MODE_ALIGNMENT (cmode);
8391 else if (DECL_P (decl))
8393 dalign = DECL_ALIGN (decl);
8395 if (dsize == 0)
8397 /* Allow BLKmode when the entire object is known to not
8398 cross a 32k boundary. */
8399 if (!DECL_SIZE_UNIT (decl))
8400 return false;
8402 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8403 return false;
8405 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8406 if (dsize > 32768)
8407 return false;
8409 dalign /= BITS_PER_UNIT;
8410 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8411 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8412 return dalign >= dsize;
8415 else
8416 gcc_unreachable ();
8418 /* Find how many bits of the alignment we know for this access. */
8419 dalign /= BITS_PER_UNIT;
8420 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8421 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8422 mask = dalign - 1;
8423 lsb = offset & -offset;
8424 mask &= lsb - 1;
8425 dalign = mask + 1;
8427 return dalign >= dsize;
8430 static bool
8431 constant_pool_expr_p (rtx op)
8433 rtx base, offset;
8435 split_const (op, &base, &offset);
8436 return (GET_CODE (base) == SYMBOL_REF
8437 && CONSTANT_POOL_ADDRESS_P (base)
8438 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8441 /* These are only used to pass through from print_operand/print_operand_address
8442 to rs6000_output_addr_const_extra over the intervening function
8443 output_addr_const which is not target code. */
8444 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8446 /* Return true if OP is a toc pointer relative address (the output
8447 of create_TOC_reference). If STRICT, do not match non-split
8448 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8449 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8450 TOCREL_OFFSET_RET respectively. */
8452 bool
8453 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8454 const_rtx *tocrel_offset_ret)
8456 if (!TARGET_TOC)
8457 return false;
8459 if (TARGET_CMODEL != CMODEL_SMALL)
8461 /* When strict ensure we have everything tidy. */
8462 if (strict
8463 && !(GET_CODE (op) == LO_SUM
8464 && REG_P (XEXP (op, 0))
8465 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8466 return false;
8468 /* When not strict, allow non-split TOC addresses and also allow
8469 (lo_sum (high ..)) TOC addresses created during reload. */
8470 if (GET_CODE (op) == LO_SUM)
8471 op = XEXP (op, 1);
8474 const_rtx tocrel_base = op;
8475 const_rtx tocrel_offset = const0_rtx;
8477 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8479 tocrel_base = XEXP (op, 0);
8480 tocrel_offset = XEXP (op, 1);
8483 if (tocrel_base_ret)
8484 *tocrel_base_ret = tocrel_base;
8485 if (tocrel_offset_ret)
8486 *tocrel_offset_ret = tocrel_offset;
8488 return (GET_CODE (tocrel_base) == UNSPEC
8489 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8492 /* Return true if X is a constant pool address, and also for cmodel=medium
8493 if X is a toc-relative address known to be offsettable within MODE. */
8495 bool
8496 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8497 bool strict)
8499 const_rtx tocrel_base, tocrel_offset;
8500 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8501 && (TARGET_CMODEL != CMODEL_MEDIUM
8502 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8503 || mode == QImode
8504 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8505 INTVAL (tocrel_offset), mode)));
8508 static bool
8509 legitimate_small_data_p (machine_mode mode, rtx x)
8511 return (DEFAULT_ABI == ABI_V4
8512 && !flag_pic && !TARGET_TOC
8513 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8514 && small_data_operand (x, mode));
8517 bool
8518 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8519 bool strict, bool worst_case)
8521 unsigned HOST_WIDE_INT offset;
8522 unsigned int extra;
8524 if (GET_CODE (x) != PLUS)
8525 return false;
8526 if (!REG_P (XEXP (x, 0)))
8527 return false;
8528 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8529 return false;
8530 if (mode_supports_vsx_dform_quad (mode))
8531 return quad_address_p (x, mode, strict);
8532 if (!reg_offset_addressing_ok_p (mode))
8533 return virtual_stack_registers_memory_p (x);
8534 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8535 return true;
8536 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8537 return false;
8539 offset = INTVAL (XEXP (x, 1));
8540 extra = 0;
8541 switch (mode)
8543 case E_V2SImode:
8544 case E_V2SFmode:
8545 /* Paired single modes: offset addressing isn't valid. */
8546 return false;
8548 case E_DFmode:
8549 case E_DDmode:
8550 case E_DImode:
8551 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8552 addressing. */
8553 if (VECTOR_MEM_VSX_P (mode))
8554 return false;
8556 if (!worst_case)
8557 break;
8558 if (!TARGET_POWERPC64)
8559 extra = 4;
8560 else if (offset & 3)
8561 return false;
8562 break;
8564 case E_TFmode:
8565 case E_IFmode:
8566 case E_KFmode:
8567 case E_TDmode:
8568 case E_TImode:
8569 case E_PTImode:
8570 extra = 8;
8571 if (!worst_case)
8572 break;
8573 if (!TARGET_POWERPC64)
8574 extra = 12;
8575 else if (offset & 3)
8576 return false;
8577 break;
8579 default:
8580 break;
8583 offset += 0x8000;
8584 return offset < 0x10000 - extra;
8587 bool
8588 legitimate_indexed_address_p (rtx x, int strict)
8590 rtx op0, op1;
8592 if (GET_CODE (x) != PLUS)
8593 return false;
8595 op0 = XEXP (x, 0);
8596 op1 = XEXP (x, 1);
8598 return (REG_P (op0) && REG_P (op1)
8599 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8600 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8601 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8602 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8605 bool
8606 avoiding_indexed_address_p (machine_mode mode)
8608 /* Avoid indexed addressing for modes that have non-indexed
8609 load/store instruction forms. */
8610 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8613 bool
8614 legitimate_indirect_address_p (rtx x, int strict)
8616 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8619 bool
8620 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8622 if (!TARGET_MACHO || !flag_pic
8623 || mode != SImode || GET_CODE (x) != MEM)
8624 return false;
8625 x = XEXP (x, 0);
8627 if (GET_CODE (x) != LO_SUM)
8628 return false;
8629 if (GET_CODE (XEXP (x, 0)) != REG)
8630 return false;
8631 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8632 return false;
8633 x = XEXP (x, 1);
8635 return CONSTANT_P (x);
8638 static bool
8639 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8641 if (GET_CODE (x) != LO_SUM)
8642 return false;
8643 if (GET_CODE (XEXP (x, 0)) != REG)
8644 return false;
8645 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8646 return false;
8647 /* quad word addresses are restricted, and we can't use LO_SUM. */
8648 if (mode_supports_vsx_dform_quad (mode))
8649 return false;
8650 x = XEXP (x, 1);
8652 if (TARGET_ELF || TARGET_MACHO)
8654 bool large_toc_ok;
8656 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8657 return false;
8658 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8659 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8660 recognizes some LO_SUM addresses as valid although this
8661 function says opposite. In most cases, LRA through different
8662 transformations can generate correct code for address reloads.
8663 It can not manage only some LO_SUM cases. So we need to add
8664 code analogous to one in rs6000_legitimize_reload_address for
8665 LOW_SUM here saying that some addresses are still valid. */
8666 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8667 && small_toc_ref (x, VOIDmode));
8668 if (TARGET_TOC && ! large_toc_ok)
8669 return false;
8670 if (GET_MODE_NUNITS (mode) != 1)
8671 return false;
8672 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8673 && !(/* ??? Assume floating point reg based on mode? */
8674 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8675 && (mode == DFmode || mode == DDmode)))
8676 return false;
8678 return CONSTANT_P (x) || large_toc_ok;
8681 return false;
8685 /* Try machine-dependent ways of modifying an illegitimate address
8686 to be legitimate. If we find one, return the new, valid address.
8687 This is used from only one place: `memory_address' in explow.c.
8689 OLDX is the address as it was before break_out_memory_refs was
8690 called. In some cases it is useful to look at this to decide what
8691 needs to be done.
8693 It is always safe for this function to do nothing. It exists to
8694 recognize opportunities to optimize the output.
8696 On RS/6000, first check for the sum of a register with a constant
8697 integer that is out of range. If so, generate code to add the
8698 constant with the low-order 16 bits masked to the register and force
8699 this result into another register (this can be done with `cau').
8700 Then generate an address of REG+(CONST&0xffff), allowing for the
8701 possibility of bit 16 being a one.
8703 Then check for the sum of a register and something not constant, try to
8704 load the other things into a register and return the sum. */
8706 static rtx
8707 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8708 machine_mode mode)
8710 unsigned int extra;
8712 if (!reg_offset_addressing_ok_p (mode)
8713 || mode_supports_vsx_dform_quad (mode))
8715 if (virtual_stack_registers_memory_p (x))
8716 return x;
8718 /* In theory we should not be seeing addresses of the form reg+0,
8719 but just in case it is generated, optimize it away. */
8720 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8721 return force_reg (Pmode, XEXP (x, 0));
8723 /* For TImode with load/store quad, restrict addresses to just a single
8724 pointer, so it works with both GPRs and VSX registers. */
8725 /* Make sure both operands are registers. */
8726 else if (GET_CODE (x) == PLUS
8727 && (mode != TImode || !TARGET_VSX))
8728 return gen_rtx_PLUS (Pmode,
8729 force_reg (Pmode, XEXP (x, 0)),
8730 force_reg (Pmode, XEXP (x, 1)));
8731 else
8732 return force_reg (Pmode, x);
8734 if (GET_CODE (x) == SYMBOL_REF)
8736 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8737 if (model != 0)
8738 return rs6000_legitimize_tls_address (x, model);
8741 extra = 0;
8742 switch (mode)
8744 case E_TFmode:
8745 case E_TDmode:
8746 case E_TImode:
8747 case E_PTImode:
8748 case E_IFmode:
8749 case E_KFmode:
8750 /* As in legitimate_offset_address_p we do not assume
8751 worst-case. The mode here is just a hint as to the registers
8752 used. A TImode is usually in gprs, but may actually be in
8753 fprs. Leave worst-case scenario for reload to handle via
8754 insn constraints. PTImode is only GPRs. */
8755 extra = 8;
8756 break;
8757 default:
8758 break;
8761 if (GET_CODE (x) == PLUS
8762 && GET_CODE (XEXP (x, 0)) == REG
8763 && GET_CODE (XEXP (x, 1)) == CONST_INT
8764 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8765 >= 0x10000 - extra)
8766 && !PAIRED_VECTOR_MODE (mode))
8768 HOST_WIDE_INT high_int, low_int;
8769 rtx sum;
8770 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8771 if (low_int >= 0x8000 - extra)
8772 low_int = 0;
8773 high_int = INTVAL (XEXP (x, 1)) - low_int;
8774 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8775 GEN_INT (high_int)), 0);
8776 return plus_constant (Pmode, sum, low_int);
8778 else if (GET_CODE (x) == PLUS
8779 && GET_CODE (XEXP (x, 0)) == REG
8780 && GET_CODE (XEXP (x, 1)) != CONST_INT
8781 && GET_MODE_NUNITS (mode) == 1
8782 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8783 || (/* ??? Assume floating point reg based on mode? */
8784 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8785 && (mode == DFmode || mode == DDmode)))
8786 && !avoiding_indexed_address_p (mode))
8788 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8789 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8791 else if (PAIRED_VECTOR_MODE (mode))
8793 if (mode == DImode)
8794 return x;
8795 /* We accept [reg + reg]. */
8797 if (GET_CODE (x) == PLUS)
8799 rtx op1 = XEXP (x, 0);
8800 rtx op2 = XEXP (x, 1);
8801 rtx y;
8803 op1 = force_reg (Pmode, op1);
8804 op2 = force_reg (Pmode, op2);
8806 /* We can't always do [reg + reg] for these, because [reg +
8807 reg + offset] is not a legitimate addressing mode. */
8808 y = gen_rtx_PLUS (Pmode, op1, op2);
8810 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8811 return force_reg (Pmode, y);
8812 else
8813 return y;
8816 return force_reg (Pmode, x);
8818 else if ((TARGET_ELF
8819 #if TARGET_MACHO
8820 || !MACHO_DYNAMIC_NO_PIC_P
8821 #endif
8823 && TARGET_32BIT
8824 && TARGET_NO_TOC
8825 && ! flag_pic
8826 && GET_CODE (x) != CONST_INT
8827 && GET_CODE (x) != CONST_WIDE_INT
8828 && GET_CODE (x) != CONST_DOUBLE
8829 && CONSTANT_P (x)
8830 && GET_MODE_NUNITS (mode) == 1
8831 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8832 || (/* ??? Assume floating point reg based on mode? */
8833 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8834 && (mode == DFmode || mode == DDmode))))
8836 rtx reg = gen_reg_rtx (Pmode);
8837 if (TARGET_ELF)
8838 emit_insn (gen_elf_high (reg, x));
8839 else
8840 emit_insn (gen_macho_high (reg, x));
8841 return gen_rtx_LO_SUM (Pmode, reg, x);
8843 else if (TARGET_TOC
8844 && GET_CODE (x) == SYMBOL_REF
8845 && constant_pool_expr_p (x)
8846 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8847 return create_TOC_reference (x, NULL_RTX);
8848 else
8849 return x;
8852 /* Debug version of rs6000_legitimize_address. */
8853 static rtx
8854 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8856 rtx ret;
8857 rtx_insn *insns;
8859 start_sequence ();
8860 ret = rs6000_legitimize_address (x, oldx, mode);
8861 insns = get_insns ();
8862 end_sequence ();
8864 if (ret != x)
8866 fprintf (stderr,
8867 "\nrs6000_legitimize_address: mode %s, old code %s, "
8868 "new code %s, modified\n",
8869 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8870 GET_RTX_NAME (GET_CODE (ret)));
8872 fprintf (stderr, "Original address:\n");
8873 debug_rtx (x);
8875 fprintf (stderr, "oldx:\n");
8876 debug_rtx (oldx);
8878 fprintf (stderr, "New address:\n");
8879 debug_rtx (ret);
8881 if (insns)
8883 fprintf (stderr, "Insns added:\n");
8884 debug_rtx_list (insns, 20);
8887 else
8889 fprintf (stderr,
8890 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8891 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8893 debug_rtx (x);
8896 if (insns)
8897 emit_insn (insns);
8899 return ret;
8902 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8903 We need to emit DTP-relative relocations. */
8905 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8906 static void
8907 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8909 switch (size)
8911 case 4:
8912 fputs ("\t.long\t", file);
8913 break;
8914 case 8:
8915 fputs (DOUBLE_INT_ASM_OP, file);
8916 break;
8917 default:
8918 gcc_unreachable ();
8920 output_addr_const (file, x);
8921 if (TARGET_ELF)
8922 fputs ("@dtprel+0x8000", file);
8923 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8925 switch (SYMBOL_REF_TLS_MODEL (x))
8927 case 0:
8928 break;
8929 case TLS_MODEL_LOCAL_EXEC:
8930 fputs ("@le", file);
8931 break;
8932 case TLS_MODEL_INITIAL_EXEC:
8933 fputs ("@ie", file);
8934 break;
8935 case TLS_MODEL_GLOBAL_DYNAMIC:
8936 case TLS_MODEL_LOCAL_DYNAMIC:
8937 fputs ("@m", file);
8938 break;
8939 default:
8940 gcc_unreachable ();
8945 /* Return true if X is a symbol that refers to real (rather than emulated)
8946 TLS. */
8948 static bool
8949 rs6000_real_tls_symbol_ref_p (rtx x)
8951 return (GET_CODE (x) == SYMBOL_REF
8952 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8955 /* In the name of slightly smaller debug output, and to cater to
8956 general assembler lossage, recognize various UNSPEC sequences
8957 and turn them back into a direct symbol reference. */
8959 static rtx
8960 rs6000_delegitimize_address (rtx orig_x)
8962 rtx x, y, offset;
8964 orig_x = delegitimize_mem_from_attrs (orig_x);
8965 x = orig_x;
8966 if (MEM_P (x))
8967 x = XEXP (x, 0);
8969 y = x;
8970 if (TARGET_CMODEL != CMODEL_SMALL
8971 && GET_CODE (y) == LO_SUM)
8972 y = XEXP (y, 1);
8974 offset = NULL_RTX;
8975 if (GET_CODE (y) == PLUS
8976 && GET_MODE (y) == Pmode
8977 && CONST_INT_P (XEXP (y, 1)))
8979 offset = XEXP (y, 1);
8980 y = XEXP (y, 0);
8983 if (GET_CODE (y) == UNSPEC
8984 && XINT (y, 1) == UNSPEC_TOCREL)
8986 y = XVECEXP (y, 0, 0);
8988 #ifdef HAVE_AS_TLS
8989 /* Do not associate thread-local symbols with the original
8990 constant pool symbol. */
8991 if (TARGET_XCOFF
8992 && GET_CODE (y) == SYMBOL_REF
8993 && CONSTANT_POOL_ADDRESS_P (y)
8994 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8995 return orig_x;
8996 #endif
8998 if (offset != NULL_RTX)
8999 y = gen_rtx_PLUS (Pmode, y, offset);
9000 if (!MEM_P (orig_x))
9001 return y;
9002 else
9003 return replace_equiv_address_nv (orig_x, y);
9006 if (TARGET_MACHO
9007 && GET_CODE (orig_x) == LO_SUM
9008 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9010 y = XEXP (XEXP (orig_x, 1), 0);
9011 if (GET_CODE (y) == UNSPEC
9012 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9013 return XVECEXP (y, 0, 0);
9016 return orig_x;
9019 /* Return true if X shouldn't be emitted into the debug info.
9020 The linker doesn't like .toc section references from
9021 .debug_* sections, so reject .toc section symbols. */
9023 static bool
9024 rs6000_const_not_ok_for_debug_p (rtx x)
9026 if (GET_CODE (x) == UNSPEC)
9027 return true;
9028 if (GET_CODE (x) == SYMBOL_REF
9029 && CONSTANT_POOL_ADDRESS_P (x))
9031 rtx c = get_pool_constant (x);
9032 machine_mode cmode = get_pool_mode (x);
9033 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9034 return true;
9037 return false;
9041 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9043 static bool
9044 rs6000_legitimate_combined_insn (rtx_insn *insn)
9046 int icode = INSN_CODE (insn);
9048 /* Reject creating doloop insns. Combine should not be allowed
9049 to create these for a number of reasons:
9050 1) In a nested loop, if combine creates one of these in an
9051 outer loop and the register allocator happens to allocate ctr
9052 to the outer loop insn, then the inner loop can't use ctr.
9053 Inner loops ought to be more highly optimized.
9054 2) Combine often wants to create one of these from what was
9055 originally a three insn sequence, first combining the three
9056 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9057 allocated ctr, the splitter takes use back to the three insn
9058 sequence. It's better to stop combine at the two insn
9059 sequence.
9060 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9061 insns, the register allocator sometimes uses floating point
9062 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9063 jump insn and output reloads are not implemented for jumps,
9064 the ctrsi/ctrdi splitters need to handle all possible cases.
9065 That's a pain, and it gets to be seriously difficult when a
9066 splitter that runs after reload needs memory to transfer from
9067 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9068 for the difficult case. It's better to not create problems
9069 in the first place. */
9070 if (icode != CODE_FOR_nothing
9071 && (icode == CODE_FOR_bdz_si
9072 || icode == CODE_FOR_bdz_di
9073 || icode == CODE_FOR_bdnz_si
9074 || icode == CODE_FOR_bdnz_di
9075 || icode == CODE_FOR_bdztf_si
9076 || icode == CODE_FOR_bdztf_di
9077 || icode == CODE_FOR_bdnztf_si
9078 || icode == CODE_FOR_bdnztf_di))
9079 return false;
9081 return true;
9084 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9086 static GTY(()) rtx rs6000_tls_symbol;
9087 static rtx
9088 rs6000_tls_get_addr (void)
9090 if (!rs6000_tls_symbol)
9091 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9093 return rs6000_tls_symbol;
9096 /* Construct the SYMBOL_REF for TLS GOT references. */
9098 static GTY(()) rtx rs6000_got_symbol;
9099 static rtx
9100 rs6000_got_sym (void)
9102 if (!rs6000_got_symbol)
9104 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9105 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9106 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9109 return rs6000_got_symbol;
9112 /* AIX Thread-Local Address support. */
9114 static rtx
9115 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9117 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9118 const char *name;
9119 char *tlsname;
9121 name = XSTR (addr, 0);
9122 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9123 or the symbol will be in TLS private data section. */
9124 if (name[strlen (name) - 1] != ']'
9125 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9126 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9128 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9129 strcpy (tlsname, name);
9130 strcat (tlsname,
9131 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9132 tlsaddr = copy_rtx (addr);
9133 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9135 else
9136 tlsaddr = addr;
9138 /* Place addr into TOC constant pool. */
9139 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9141 /* Output the TOC entry and create the MEM referencing the value. */
9142 if (constant_pool_expr_p (XEXP (sym, 0))
9143 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9145 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9146 mem = gen_const_mem (Pmode, tocref);
9147 set_mem_alias_set (mem, get_TOC_alias_set ());
9149 else
9150 return sym;
9152 /* Use global-dynamic for local-dynamic. */
9153 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9154 || model == TLS_MODEL_LOCAL_DYNAMIC)
9156 /* Create new TOC reference for @m symbol. */
9157 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9158 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9159 strcpy (tlsname, "*LCM");
9160 strcat (tlsname, name + 3);
9161 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9162 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9163 tocref = create_TOC_reference (modaddr, NULL_RTX);
9164 rtx modmem = gen_const_mem (Pmode, tocref);
9165 set_mem_alias_set (modmem, get_TOC_alias_set ());
9167 rtx modreg = gen_reg_rtx (Pmode);
9168 emit_insn (gen_rtx_SET (modreg, modmem));
9170 tmpreg = gen_reg_rtx (Pmode);
9171 emit_insn (gen_rtx_SET (tmpreg, mem));
9173 dest = gen_reg_rtx (Pmode);
9174 if (TARGET_32BIT)
9175 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9176 else
9177 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9178 return dest;
9180 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9181 else if (TARGET_32BIT)
9183 tlsreg = gen_reg_rtx (SImode);
9184 emit_insn (gen_tls_get_tpointer (tlsreg));
9186 else
9187 tlsreg = gen_rtx_REG (DImode, 13);
9189 /* Load the TOC value into temporary register. */
9190 tmpreg = gen_reg_rtx (Pmode);
9191 emit_insn (gen_rtx_SET (tmpreg, mem));
9192 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9193 gen_rtx_MINUS (Pmode, addr, tlsreg));
9195 /* Add TOC symbol value to TLS pointer. */
9196 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9198 return dest;
9201 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9202 this (thread-local) address. */
9204 static rtx
9205 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9207 rtx dest, insn;
9209 if (TARGET_XCOFF)
9210 return rs6000_legitimize_tls_address_aix (addr, model);
9212 dest = gen_reg_rtx (Pmode);
9213 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9215 rtx tlsreg;
9217 if (TARGET_64BIT)
9219 tlsreg = gen_rtx_REG (Pmode, 13);
9220 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9222 else
9224 tlsreg = gen_rtx_REG (Pmode, 2);
9225 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9227 emit_insn (insn);
9229 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9231 rtx tlsreg, tmp;
9233 tmp = gen_reg_rtx (Pmode);
9234 if (TARGET_64BIT)
9236 tlsreg = gen_rtx_REG (Pmode, 13);
9237 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9239 else
9241 tlsreg = gen_rtx_REG (Pmode, 2);
9242 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9244 emit_insn (insn);
9245 if (TARGET_64BIT)
9246 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9247 else
9248 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9249 emit_insn (insn);
9251 else
9253 rtx r3, got, tga, tmp1, tmp2, call_insn;
9255 /* We currently use relocations like @got@tlsgd for tls, which
9256 means the linker will handle allocation of tls entries, placing
9257 them in the .got section. So use a pointer to the .got section,
9258 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9259 or to secondary GOT sections used by 32-bit -fPIC. */
9260 if (TARGET_64BIT)
9261 got = gen_rtx_REG (Pmode, 2);
9262 else
9264 if (flag_pic == 1)
9265 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9266 else
9268 rtx gsym = rs6000_got_sym ();
9269 got = gen_reg_rtx (Pmode);
9270 if (flag_pic == 0)
9271 rs6000_emit_move (got, gsym, Pmode);
9272 else
9274 rtx mem, lab;
9276 tmp1 = gen_reg_rtx (Pmode);
9277 tmp2 = gen_reg_rtx (Pmode);
9278 mem = gen_const_mem (Pmode, tmp1);
9279 lab = gen_label_rtx ();
9280 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9281 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9282 if (TARGET_LINK_STACK)
9283 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9284 emit_move_insn (tmp2, mem);
9285 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9286 set_unique_reg_note (last, REG_EQUAL, gsym);
9291 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9293 tga = rs6000_tls_get_addr ();
9294 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9295 const0_rtx, Pmode);
9297 r3 = gen_rtx_REG (Pmode, 3);
9298 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9300 if (TARGET_64BIT)
9301 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9302 else
9303 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9305 else if (DEFAULT_ABI == ABI_V4)
9306 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9307 else
9308 gcc_unreachable ();
9309 call_insn = last_call_insn ();
9310 PATTERN (call_insn) = insn;
9311 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9312 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9313 pic_offset_table_rtx);
9315 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9317 tga = rs6000_tls_get_addr ();
9318 tmp1 = gen_reg_rtx (Pmode);
9319 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9320 const0_rtx, Pmode);
9322 r3 = gen_rtx_REG (Pmode, 3);
9323 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9325 if (TARGET_64BIT)
9326 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9327 else
9328 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9330 else if (DEFAULT_ABI == ABI_V4)
9331 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9332 else
9333 gcc_unreachable ();
9334 call_insn = last_call_insn ();
9335 PATTERN (call_insn) = insn;
9336 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9337 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9338 pic_offset_table_rtx);
9340 if (rs6000_tls_size == 16)
9342 if (TARGET_64BIT)
9343 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9344 else
9345 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9347 else if (rs6000_tls_size == 32)
9349 tmp2 = gen_reg_rtx (Pmode);
9350 if (TARGET_64BIT)
9351 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9352 else
9353 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9354 emit_insn (insn);
9355 if (TARGET_64BIT)
9356 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9357 else
9358 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9360 else
9362 tmp2 = gen_reg_rtx (Pmode);
9363 if (TARGET_64BIT)
9364 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9365 else
9366 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9367 emit_insn (insn);
9368 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9370 emit_insn (insn);
9372 else
9374 /* IE, or 64-bit offset LE. */
9375 tmp2 = gen_reg_rtx (Pmode);
9376 if (TARGET_64BIT)
9377 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9378 else
9379 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9380 emit_insn (insn);
9381 if (TARGET_64BIT)
9382 insn = gen_tls_tls_64 (dest, tmp2, addr);
9383 else
9384 insn = gen_tls_tls_32 (dest, tmp2, addr);
9385 emit_insn (insn);
9389 return dest;
9392 /* Only create the global variable for the stack protect guard if we are using
9393 the global flavor of that guard. */
9394 static tree
9395 rs6000_init_stack_protect_guard (void)
9397 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9398 return default_stack_protect_guard ();
9400 return NULL_TREE;
9403 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9405 static bool
9406 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9408 if (GET_CODE (x) == HIGH
9409 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9410 return true;
9412 /* A TLS symbol in the TOC cannot contain a sum. */
9413 if (GET_CODE (x) == CONST
9414 && GET_CODE (XEXP (x, 0)) == PLUS
9415 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9416 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9417 return true;
9419 /* Do not place an ELF TLS symbol in the constant pool. */
9420 return TARGET_ELF && tls_referenced_p (x);
9423 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9424 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9425 can be addressed relative to the toc pointer. */
9427 static bool
9428 use_toc_relative_ref (rtx sym, machine_mode mode)
9430 return ((constant_pool_expr_p (sym)
9431 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9432 get_pool_mode (sym)))
9433 || (TARGET_CMODEL == CMODEL_MEDIUM
9434 && SYMBOL_REF_LOCAL_P (sym)
9435 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9438 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9439 replace the input X, or the original X if no replacement is called for.
9440 The output parameter *WIN is 1 if the calling macro should goto WIN,
9441 0 if it should not.
9443 For RS/6000, we wish to handle large displacements off a base
9444 register by splitting the addend across an addiu/addis and the mem insn.
9445 This cuts number of extra insns needed from 3 to 1.
9447 On Darwin, we use this to generate code for floating point constants.
9448 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9449 The Darwin code is inside #if TARGET_MACHO because only then are the
9450 machopic_* functions defined. */
9451 static rtx
9452 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9453 int opnum, int type,
9454 int ind_levels ATTRIBUTE_UNUSED, int *win)
9456 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9457 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9459 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9460 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9461 if (reg_offset_p
9462 && opnum == 1
9463 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9464 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9465 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9466 && TARGET_P9_VECTOR)
9467 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9468 && TARGET_P9_VECTOR)))
9469 reg_offset_p = false;
9471 /* We must recognize output that we have already generated ourselves. */
9472 if (GET_CODE (x) == PLUS
9473 && GET_CODE (XEXP (x, 0)) == PLUS
9474 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9475 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9476 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9478 if (TARGET_DEBUG_ADDR)
9480 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9481 debug_rtx (x);
9483 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9484 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9485 opnum, (enum reload_type) type);
9486 *win = 1;
9487 return x;
9490 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9491 if (GET_CODE (x) == LO_SUM
9492 && GET_CODE (XEXP (x, 0)) == HIGH)
9494 if (TARGET_DEBUG_ADDR)
9496 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9497 debug_rtx (x);
9499 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9500 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9501 opnum, (enum reload_type) type);
9502 *win = 1;
9503 return x;
9506 #if TARGET_MACHO
9507 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9508 && GET_CODE (x) == LO_SUM
9509 && GET_CODE (XEXP (x, 0)) == PLUS
9510 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9511 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9512 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9513 && machopic_operand_p (XEXP (x, 1)))
9515 /* Result of previous invocation of this function on Darwin
9516 floating point constant. */
9517 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9518 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9519 opnum, (enum reload_type) type);
9520 *win = 1;
9521 return x;
9523 #endif
9525 if (TARGET_CMODEL != CMODEL_SMALL
9526 && reg_offset_p
9527 && !quad_offset_p
9528 && small_toc_ref (x, VOIDmode))
9530 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9531 x = gen_rtx_LO_SUM (Pmode, hi, x);
9532 if (TARGET_DEBUG_ADDR)
9534 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9535 debug_rtx (x);
9537 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9538 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9539 opnum, (enum reload_type) type);
9540 *win = 1;
9541 return x;
9544 if (GET_CODE (x) == PLUS
9545 && REG_P (XEXP (x, 0))
9546 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9547 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9548 && CONST_INT_P (XEXP (x, 1))
9549 && reg_offset_p
9550 && !PAIRED_VECTOR_MODE (mode)
9551 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9553 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9554 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9555 HOST_WIDE_INT high
9556 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9558 /* Check for 32-bit overflow or quad addresses with one of the
9559 four least significant bits set. */
9560 if (high + low != val
9561 || (quad_offset_p && (low & 0xf)))
9563 *win = 0;
9564 return x;
9567 /* Reload the high part into a base reg; leave the low part
9568 in the mem directly. */
9570 x = gen_rtx_PLUS (GET_MODE (x),
9571 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9572 GEN_INT (high)),
9573 GEN_INT (low));
9575 if (TARGET_DEBUG_ADDR)
9577 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9578 debug_rtx (x);
9580 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9581 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9582 opnum, (enum reload_type) type);
9583 *win = 1;
9584 return x;
9587 if (GET_CODE (x) == SYMBOL_REF
9588 && reg_offset_p
9589 && !quad_offset_p
9590 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9591 && !PAIRED_VECTOR_MODE (mode)
9592 #if TARGET_MACHO
9593 && DEFAULT_ABI == ABI_DARWIN
9594 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9595 && machopic_symbol_defined_p (x)
9596 #else
9597 && DEFAULT_ABI == ABI_V4
9598 && !flag_pic
9599 #endif
9600 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9601 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9602 without fprs.
9603 ??? Assume floating point reg based on mode? This assumption is
9604 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9605 where reload ends up doing a DFmode load of a constant from
9606 mem using two gprs. Unfortunately, at this point reload
9607 hasn't yet selected regs so poking around in reload data
9608 won't help and even if we could figure out the regs reliably,
9609 we'd still want to allow this transformation when the mem is
9610 naturally aligned. Since we say the address is good here, we
9611 can't disable offsets from LO_SUMs in mem_operand_gpr.
9612 FIXME: Allow offset from lo_sum for other modes too, when
9613 mem is sufficiently aligned.
9615 Also disallow this if the type can go in VMX/Altivec registers, since
9616 those registers do not have d-form (reg+offset) address modes. */
9617 && !reg_addr[mode].scalar_in_vmx_p
9618 && mode != TFmode
9619 && mode != TDmode
9620 && mode != IFmode
9621 && mode != KFmode
9622 && (mode != TImode || !TARGET_VSX)
9623 && mode != PTImode
9624 && (mode != DImode || TARGET_POWERPC64)
9625 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9626 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9628 #if TARGET_MACHO
9629 if (flag_pic)
9631 rtx offset = machopic_gen_offset (x);
9632 x = gen_rtx_LO_SUM (GET_MODE (x),
9633 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9634 gen_rtx_HIGH (Pmode, offset)), offset);
9636 else
9637 #endif
9638 x = gen_rtx_LO_SUM (GET_MODE (x),
9639 gen_rtx_HIGH (Pmode, x), x);
9641 if (TARGET_DEBUG_ADDR)
9643 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9644 debug_rtx (x);
9646 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9647 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9648 opnum, (enum reload_type) type);
9649 *win = 1;
9650 return x;
9653 /* Reload an offset address wrapped by an AND that represents the
9654 masking of the lower bits. Strip the outer AND and let reload
9655 convert the offset address into an indirect address. For VSX,
9656 force reload to create the address with an AND in a separate
9657 register, because we can't guarantee an altivec register will
9658 be used. */
9659 if (VECTOR_MEM_ALTIVEC_P (mode)
9660 && GET_CODE (x) == AND
9661 && GET_CODE (XEXP (x, 0)) == PLUS
9662 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9663 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9664 && GET_CODE (XEXP (x, 1)) == CONST_INT
9665 && INTVAL (XEXP (x, 1)) == -16)
9667 x = XEXP (x, 0);
9668 *win = 1;
9669 return x;
9672 if (TARGET_TOC
9673 && reg_offset_p
9674 && !quad_offset_p
9675 && GET_CODE (x) == SYMBOL_REF
9676 && use_toc_relative_ref (x, mode))
9678 x = create_TOC_reference (x, NULL_RTX);
9679 if (TARGET_CMODEL != CMODEL_SMALL)
9681 if (TARGET_DEBUG_ADDR)
9683 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9684 debug_rtx (x);
9686 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9687 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9688 opnum, (enum reload_type) type);
9690 *win = 1;
9691 return x;
9693 *win = 0;
9694 return x;
9697 /* Debug version of rs6000_legitimize_reload_address. */
9698 static rtx
9699 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9700 int opnum, int type,
9701 int ind_levels, int *win)
9703 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9704 ind_levels, win);
9705 fprintf (stderr,
9706 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9707 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9708 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9709 debug_rtx (x);
9711 if (x == ret)
9712 fprintf (stderr, "Same address returned\n");
9713 else if (!ret)
9714 fprintf (stderr, "NULL returned\n");
9715 else
9717 fprintf (stderr, "New address:\n");
9718 debug_rtx (ret);
9721 return ret;
9724 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9725 that is a valid memory address for an instruction.
9726 The MODE argument is the machine mode for the MEM expression
9727 that wants to use this address.
9729 On the RS/6000, there are four valid address: a SYMBOL_REF that
9730 refers to a constant pool entry of an address (or the sum of it
9731 plus a constant), a short (16-bit signed) constant plus a register,
9732 the sum of two registers, or a register indirect, possibly with an
9733 auto-increment. For DFmode, DDmode and DImode with a constant plus
9734 register, we must ensure that both words are addressable or PowerPC64
9735 with offset word aligned.
9737 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9738 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9739 because adjacent memory cells are accessed by adding word-sized offsets
9740 during assembly output. */
9741 static bool
9742 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9744 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9745 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9747 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9748 if (VECTOR_MEM_ALTIVEC_P (mode)
9749 && GET_CODE (x) == AND
9750 && GET_CODE (XEXP (x, 1)) == CONST_INT
9751 && INTVAL (XEXP (x, 1)) == -16)
9752 x = XEXP (x, 0);
9754 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9755 return 0;
9756 if (legitimate_indirect_address_p (x, reg_ok_strict))
9757 return 1;
9758 if (TARGET_UPDATE
9759 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9760 && mode_supports_pre_incdec_p (mode)
9761 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9762 return 1;
9763 /* Handle restricted vector d-form offsets in ISA 3.0. */
9764 if (quad_offset_p)
9766 if (quad_address_p (x, mode, reg_ok_strict))
9767 return 1;
9769 else if (virtual_stack_registers_memory_p (x))
9770 return 1;
9772 else if (reg_offset_p)
9774 if (legitimate_small_data_p (mode, x))
9775 return 1;
9776 if (legitimate_constant_pool_address_p (x, mode,
9777 reg_ok_strict || lra_in_progress))
9778 return 1;
9779 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9780 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9781 return 1;
9784 /* For TImode, if we have TImode in VSX registers, only allow register
9785 indirect addresses. This will allow the values to go in either GPRs
9786 or VSX registers without reloading. The vector types would tend to
9787 go into VSX registers, so we allow REG+REG, while TImode seems
9788 somewhat split, in that some uses are GPR based, and some VSX based. */
9789 /* FIXME: We could loosen this by changing the following to
9790 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9791 but currently we cannot allow REG+REG addressing for TImode. See
9792 PR72827 for complete details on how this ends up hoodwinking DSE. */
9793 if (mode == TImode && TARGET_VSX)
9794 return 0;
9795 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9796 if (! reg_ok_strict
9797 && reg_offset_p
9798 && GET_CODE (x) == PLUS
9799 && GET_CODE (XEXP (x, 0)) == REG
9800 && (XEXP (x, 0) == virtual_stack_vars_rtx
9801 || XEXP (x, 0) == arg_pointer_rtx)
9802 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9803 return 1;
9804 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9805 return 1;
9806 if (!FLOAT128_2REG_P (mode)
9807 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9808 || TARGET_POWERPC64
9809 || (mode != DFmode && mode != DDmode))
9810 && (TARGET_POWERPC64 || mode != DImode)
9811 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9812 && mode != PTImode
9813 && !avoiding_indexed_address_p (mode)
9814 && legitimate_indexed_address_p (x, reg_ok_strict))
9815 return 1;
9816 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9817 && mode_supports_pre_modify_p (mode)
9818 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9819 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9820 reg_ok_strict, false)
9821 || (!avoiding_indexed_address_p (mode)
9822 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9823 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9824 return 1;
9825 if (reg_offset_p && !quad_offset_p
9826 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9827 return 1;
9828 return 0;
9831 /* Debug version of rs6000_legitimate_address_p. */
9832 static bool
9833 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9834 bool reg_ok_strict)
9836 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9837 fprintf (stderr,
9838 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9839 "strict = %d, reload = %s, code = %s\n",
9840 ret ? "true" : "false",
9841 GET_MODE_NAME (mode),
9842 reg_ok_strict,
9843 (reload_completed ? "after" : "before"),
9844 GET_RTX_NAME (GET_CODE (x)));
9845 debug_rtx (x);
9847 return ret;
9850 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9852 static bool
9853 rs6000_mode_dependent_address_p (const_rtx addr,
9854 addr_space_t as ATTRIBUTE_UNUSED)
9856 return rs6000_mode_dependent_address_ptr (addr);
9859 /* Go to LABEL if ADDR (a legitimate address expression)
9860 has an effect that depends on the machine mode it is used for.
9862 On the RS/6000 this is true of all integral offsets (since AltiVec
9863 and VSX modes don't allow them) or is a pre-increment or decrement.
9865 ??? Except that due to conceptual problems in offsettable_address_p
9866 we can't really report the problems of integral offsets. So leave
9867 this assuming that the adjustable offset must be valid for the
9868 sub-words of a TFmode operand, which is what we had before. */
9870 static bool
9871 rs6000_mode_dependent_address (const_rtx addr)
9873 switch (GET_CODE (addr))
9875 case PLUS:
9876 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9877 is considered a legitimate address before reload, so there
9878 are no offset restrictions in that case. Note that this
9879 condition is safe in strict mode because any address involving
9880 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9881 been rejected as illegitimate. */
9882 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9883 && XEXP (addr, 0) != arg_pointer_rtx
9884 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9886 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9887 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9889 break;
9891 case LO_SUM:
9892 /* Anything in the constant pool is sufficiently aligned that
9893 all bytes have the same high part address. */
9894 return !legitimate_constant_pool_address_p (addr, QImode, false);
9896 /* Auto-increment cases are now treated generically in recog.c. */
9897 case PRE_MODIFY:
9898 return TARGET_UPDATE;
9900 /* AND is only allowed in Altivec loads. */
9901 case AND:
9902 return true;
9904 default:
9905 break;
9908 return false;
9911 /* Debug version of rs6000_mode_dependent_address. */
9912 static bool
9913 rs6000_debug_mode_dependent_address (const_rtx addr)
9915 bool ret = rs6000_mode_dependent_address (addr);
9917 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9918 ret ? "true" : "false");
9919 debug_rtx (addr);
9921 return ret;
9924 /* Implement FIND_BASE_TERM. */
9927 rs6000_find_base_term (rtx op)
9929 rtx base;
9931 base = op;
9932 if (GET_CODE (base) == CONST)
9933 base = XEXP (base, 0);
9934 if (GET_CODE (base) == PLUS)
9935 base = XEXP (base, 0);
9936 if (GET_CODE (base) == UNSPEC)
9937 switch (XINT (base, 1))
9939 case UNSPEC_TOCREL:
9940 case UNSPEC_MACHOPIC_OFFSET:
9941 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9942 for aliasing purposes. */
9943 return XVECEXP (base, 0, 0);
9946 return op;
9949 /* More elaborate version of recog's offsettable_memref_p predicate
9950 that works around the ??? note of rs6000_mode_dependent_address.
9951 In particular it accepts
9953 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9955 in 32-bit mode, that the recog predicate rejects. */
9957 static bool
9958 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9960 bool worst_case;
9962 if (!MEM_P (op))
9963 return false;
9965 /* First mimic offsettable_memref_p. */
9966 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9967 return true;
9969 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9970 the latter predicate knows nothing about the mode of the memory
9971 reference and, therefore, assumes that it is the largest supported
9972 mode (TFmode). As a consequence, legitimate offsettable memory
9973 references are rejected. rs6000_legitimate_offset_address_p contains
9974 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9975 at least with a little bit of help here given that we know the
9976 actual registers used. */
9977 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9978 || GET_MODE_SIZE (reg_mode) == 4);
9979 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9980 strict, worst_case);
9983 /* Determine the reassociation width to be used in reassociate_bb.
9984 This takes into account how many parallel operations we
9985 can actually do of a given type, and also the latency.
9987 int add/sub 6/cycle
9988 mul 2/cycle
9989 vect add/sub/mul 2/cycle
9990 fp add/sub/mul 2/cycle
9991 dfp 1/cycle
9994 static int
9995 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9996 machine_mode mode)
9998 switch (rs6000_tune)
10000 case PROCESSOR_POWER8:
10001 case PROCESSOR_POWER9:
10002 if (DECIMAL_FLOAT_MODE_P (mode))
10003 return 1;
10004 if (VECTOR_MODE_P (mode))
10005 return 4;
10006 if (INTEGRAL_MODE_P (mode))
10007 return 1;
10008 if (FLOAT_MODE_P (mode))
10009 return 4;
10010 break;
10011 default:
10012 break;
10014 return 1;
10017 /* Change register usage conditional on target flags. */
10018 static void
10019 rs6000_conditional_register_usage (void)
10021 int i;
10023 if (TARGET_DEBUG_TARGET)
10024 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10026 /* Set MQ register fixed (already call_used) so that it will not be
10027 allocated. */
10028 fixed_regs[64] = 1;
10030 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10031 if (TARGET_64BIT)
10032 fixed_regs[13] = call_used_regs[13]
10033 = call_really_used_regs[13] = 1;
10035 /* Conditionally disable FPRs. */
10036 if (TARGET_SOFT_FLOAT)
10037 for (i = 32; i < 64; i++)
10038 fixed_regs[i] = call_used_regs[i]
10039 = call_really_used_regs[i] = 1;
10041 /* The TOC register is not killed across calls in a way that is
10042 visible to the compiler. */
10043 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10044 call_really_used_regs[2] = 0;
10046 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10047 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10049 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10050 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10051 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10052 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10054 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10055 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10056 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10057 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10059 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10060 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10061 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10063 if (!TARGET_ALTIVEC && !TARGET_VSX)
10065 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10066 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10067 call_really_used_regs[VRSAVE_REGNO] = 1;
10070 if (TARGET_ALTIVEC || TARGET_VSX)
10071 global_regs[VSCR_REGNO] = 1;
10073 if (TARGET_ALTIVEC_ABI)
10075 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10076 call_used_regs[i] = call_really_used_regs[i] = 1;
10078 /* AIX reserves VR20:31 in non-extended ABI mode. */
10079 if (TARGET_XCOFF)
10080 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10081 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10086 /* Output insns to set DEST equal to the constant SOURCE as a series of
10087 lis, ori and shl instructions and return TRUE. */
10089 bool
10090 rs6000_emit_set_const (rtx dest, rtx source)
10092 machine_mode mode = GET_MODE (dest);
10093 rtx temp, set;
10094 rtx_insn *insn;
10095 HOST_WIDE_INT c;
10097 gcc_checking_assert (CONST_INT_P (source));
10098 c = INTVAL (source);
10099 switch (mode)
10101 case E_QImode:
10102 case E_HImode:
10103 emit_insn (gen_rtx_SET (dest, source));
10104 return true;
10106 case E_SImode:
10107 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10109 emit_insn (gen_rtx_SET (copy_rtx (temp),
10110 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10111 emit_insn (gen_rtx_SET (dest,
10112 gen_rtx_IOR (SImode, copy_rtx (temp),
10113 GEN_INT (c & 0xffff))));
10114 break;
10116 case E_DImode:
10117 if (!TARGET_POWERPC64)
10119 rtx hi, lo;
10121 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10122 DImode);
10123 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10124 DImode);
10125 emit_move_insn (hi, GEN_INT (c >> 32));
10126 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10127 emit_move_insn (lo, GEN_INT (c));
10129 else
10130 rs6000_emit_set_long_const (dest, c);
10131 break;
10133 default:
10134 gcc_unreachable ();
10137 insn = get_last_insn ();
10138 set = single_set (insn);
10139 if (! CONSTANT_P (SET_SRC (set)))
10140 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10142 return true;
10145 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10146 Output insns to set DEST equal to the constant C as a series of
10147 lis, ori and shl instructions. */
10149 static void
10150 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10152 rtx temp;
10153 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10155 ud1 = c & 0xffff;
10156 c = c >> 16;
10157 ud2 = c & 0xffff;
10158 c = c >> 16;
10159 ud3 = c & 0xffff;
10160 c = c >> 16;
10161 ud4 = c & 0xffff;
10163 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10164 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10165 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10167 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10168 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10170 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10172 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10173 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10174 if (ud1 != 0)
10175 emit_move_insn (dest,
10176 gen_rtx_IOR (DImode, copy_rtx (temp),
10177 GEN_INT (ud1)));
10179 else if (ud3 == 0 && ud4 == 0)
10181 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10183 gcc_assert (ud2 & 0x8000);
10184 emit_move_insn (copy_rtx (temp),
10185 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10186 if (ud1 != 0)
10187 emit_move_insn (copy_rtx (temp),
10188 gen_rtx_IOR (DImode, copy_rtx (temp),
10189 GEN_INT (ud1)));
10190 emit_move_insn (dest,
10191 gen_rtx_ZERO_EXTEND (DImode,
10192 gen_lowpart (SImode,
10193 copy_rtx (temp))));
10195 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10196 || (ud4 == 0 && ! (ud3 & 0x8000)))
10198 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10200 emit_move_insn (copy_rtx (temp),
10201 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10202 if (ud2 != 0)
10203 emit_move_insn (copy_rtx (temp),
10204 gen_rtx_IOR (DImode, copy_rtx (temp),
10205 GEN_INT (ud2)));
10206 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10207 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10208 GEN_INT (16)));
10209 if (ud1 != 0)
10210 emit_move_insn (dest,
10211 gen_rtx_IOR (DImode, copy_rtx (temp),
10212 GEN_INT (ud1)));
10214 else
10216 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10218 emit_move_insn (copy_rtx (temp),
10219 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10220 if (ud3 != 0)
10221 emit_move_insn (copy_rtx (temp),
10222 gen_rtx_IOR (DImode, copy_rtx (temp),
10223 GEN_INT (ud3)));
10225 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10226 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10227 GEN_INT (32)));
10228 if (ud2 != 0)
10229 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10230 gen_rtx_IOR (DImode, copy_rtx (temp),
10231 GEN_INT (ud2 << 16)));
10232 if (ud1 != 0)
10233 emit_move_insn (dest,
10234 gen_rtx_IOR (DImode, copy_rtx (temp),
10235 GEN_INT (ud1)));
10239 /* Helper for the following. Get rid of [r+r] memory refs
10240 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10242 static void
10243 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10245 if (GET_CODE (operands[0]) == MEM
10246 && GET_CODE (XEXP (operands[0], 0)) != REG
10247 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10248 GET_MODE (operands[0]), false))
10249 operands[0]
10250 = replace_equiv_address (operands[0],
10251 copy_addr_to_reg (XEXP (operands[0], 0)));
10253 if (GET_CODE (operands[1]) == MEM
10254 && GET_CODE (XEXP (operands[1], 0)) != REG
10255 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10256 GET_MODE (operands[1]), false))
10257 operands[1]
10258 = replace_equiv_address (operands[1],
10259 copy_addr_to_reg (XEXP (operands[1], 0)));
10262 /* Generate a vector of constants to permute MODE for a little-endian
10263 storage operation by swapping the two halves of a vector. */
10264 static rtvec
10265 rs6000_const_vec (machine_mode mode)
10267 int i, subparts;
10268 rtvec v;
10270 switch (mode)
10272 case E_V1TImode:
10273 subparts = 1;
10274 break;
10275 case E_V2DFmode:
10276 case E_V2DImode:
10277 subparts = 2;
10278 break;
10279 case E_V4SFmode:
10280 case E_V4SImode:
10281 subparts = 4;
10282 break;
10283 case E_V8HImode:
10284 subparts = 8;
10285 break;
10286 case E_V16QImode:
10287 subparts = 16;
10288 break;
10289 default:
10290 gcc_unreachable();
10293 v = rtvec_alloc (subparts);
10295 for (i = 0; i < subparts / 2; ++i)
10296 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10297 for (i = subparts / 2; i < subparts; ++i)
10298 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10300 return v;
10303 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10304 store operation. */
10305 void
10306 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10308 /* Scalar permutations are easier to express in integer modes rather than
10309 floating-point modes, so cast them here. We use V1TImode instead
10310 of TImode to ensure that the values don't go through GPRs. */
10311 if (FLOAT128_VECTOR_P (mode))
10313 dest = gen_lowpart (V1TImode, dest);
10314 source = gen_lowpart (V1TImode, source);
10315 mode = V1TImode;
10318 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10319 scalar. */
10320 if (mode == TImode || mode == V1TImode)
10321 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10322 GEN_INT (64))));
10323 else
10325 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10326 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10330 /* Emit a little-endian load from vector memory location SOURCE to VSX
10331 register DEST in mode MODE. The load is done with two permuting
10332 insn's that represent an lxvd2x and xxpermdi. */
10333 void
10334 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10336 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10337 V1TImode). */
10338 if (mode == TImode || mode == V1TImode)
10340 mode = V2DImode;
10341 dest = gen_lowpart (V2DImode, dest);
10342 source = adjust_address (source, V2DImode, 0);
10345 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10346 rs6000_emit_le_vsx_permute (tmp, source, mode);
10347 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10350 /* Emit a little-endian store to vector memory location DEST from VSX
10351 register SOURCE in mode MODE. The store is done with two permuting
10352 insn's that represent an xxpermdi and an stxvd2x. */
10353 void
10354 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10356 /* This should never be called during or after LRA, because it does
10357 not re-permute the source register. It is intended only for use
10358 during expand. */
10359 gcc_assert (!lra_in_progress && !reload_completed);
10361 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10362 V1TImode). */
10363 if (mode == TImode || mode == V1TImode)
10365 mode = V2DImode;
10366 dest = adjust_address (dest, V2DImode, 0);
10367 source = gen_lowpart (V2DImode, source);
10370 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10371 rs6000_emit_le_vsx_permute (tmp, source, mode);
10372 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10375 /* Emit a sequence representing a little-endian VSX load or store,
10376 moving data from SOURCE to DEST in mode MODE. This is done
10377 separately from rs6000_emit_move to ensure it is called only
10378 during expand. LE VSX loads and stores introduced later are
10379 handled with a split. The expand-time RTL generation allows
10380 us to optimize away redundant pairs of register-permutes. */
10381 void
10382 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10384 gcc_assert (!BYTES_BIG_ENDIAN
10385 && VECTOR_MEM_VSX_P (mode)
10386 && !TARGET_P9_VECTOR
10387 && !gpr_or_gpr_p (dest, source)
10388 && (MEM_P (source) ^ MEM_P (dest)));
10390 if (MEM_P (source))
10392 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10393 rs6000_emit_le_vsx_load (dest, source, mode);
10395 else
10397 if (!REG_P (source))
10398 source = force_reg (mode, source);
10399 rs6000_emit_le_vsx_store (dest, source, mode);
10403 /* Return whether a SFmode or SImode move can be done without converting one
10404 mode to another. This arrises when we have:
10406 (SUBREG:SF (REG:SI ...))
10407 (SUBREG:SI (REG:SF ...))
10409 and one of the values is in a floating point/vector register, where SFmode
10410 scalars are stored in DFmode format. */
10412 bool
10413 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10415 if (TARGET_ALLOW_SF_SUBREG)
10416 return true;
10418 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10419 return true;
10421 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10422 return true;
10424 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10425 if (SUBREG_P (dest))
10427 rtx dest_subreg = SUBREG_REG (dest);
10428 rtx src_subreg = SUBREG_REG (src);
10429 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10432 return false;
10436 /* Helper function to change moves with:
10438 (SUBREG:SF (REG:SI)) and
10439 (SUBREG:SI (REG:SF))
10441 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10442 values are stored as DFmode values in the VSX registers. We need to convert
10443 the bits before we can use a direct move or operate on the bits in the
10444 vector register as an integer type.
10446 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10448 static bool
10449 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10451 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10452 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10453 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10455 rtx inner_source = SUBREG_REG (source);
10456 machine_mode inner_mode = GET_MODE (inner_source);
10458 if (mode == SImode && inner_mode == SFmode)
10460 emit_insn (gen_movsi_from_sf (dest, inner_source));
10461 return true;
10464 if (mode == SFmode && inner_mode == SImode)
10466 emit_insn (gen_movsf_from_si (dest, inner_source));
10467 return true;
10471 return false;
10474 /* Emit a move from SOURCE to DEST in mode MODE. */
10475 void
10476 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10478 rtx operands[2];
10479 operands[0] = dest;
10480 operands[1] = source;
10482 if (TARGET_DEBUG_ADDR)
10484 fprintf (stderr,
10485 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10486 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10487 GET_MODE_NAME (mode),
10488 lra_in_progress,
10489 reload_completed,
10490 can_create_pseudo_p ());
10491 debug_rtx (dest);
10492 fprintf (stderr, "source:\n");
10493 debug_rtx (source);
10496 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10497 if (CONST_WIDE_INT_P (operands[1])
10498 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10500 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10501 gcc_unreachable ();
10504 #ifdef HAVE_AS_GNU_ATTRIBUTE
10505 /* If we use a long double type, set the flags in .gnu_attribute that say
10506 what the long double type is. This is to allow the linker's warning
10507 message for the wrong long double to be useful, even if the function does
10508 not do a call (for example, doing a 128-bit add on power9 if the long
10509 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10510 used if they aren't the default long dobule type. */
10511 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
10513 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
10514 rs6000_passes_float = rs6000_passes_long_double = true;
10516 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
10517 rs6000_passes_float = rs6000_passes_long_double = true;
10519 #endif
10521 /* See if we need to special case SImode/SFmode SUBREG moves. */
10522 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10523 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10524 return;
10526 /* Check if GCC is setting up a block move that will end up using FP
10527 registers as temporaries. We must make sure this is acceptable. */
10528 if (GET_CODE (operands[0]) == MEM
10529 && GET_CODE (operands[1]) == MEM
10530 && mode == DImode
10531 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10532 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10533 && ! (rs6000_slow_unaligned_access (SImode,
10534 (MEM_ALIGN (operands[0]) > 32
10535 ? 32 : MEM_ALIGN (operands[0])))
10536 || rs6000_slow_unaligned_access (SImode,
10537 (MEM_ALIGN (operands[1]) > 32
10538 ? 32 : MEM_ALIGN (operands[1]))))
10539 && ! MEM_VOLATILE_P (operands [0])
10540 && ! MEM_VOLATILE_P (operands [1]))
10542 emit_move_insn (adjust_address (operands[0], SImode, 0),
10543 adjust_address (operands[1], SImode, 0));
10544 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10545 adjust_address (copy_rtx (operands[1]), SImode, 4));
10546 return;
10549 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10550 && !gpc_reg_operand (operands[1], mode))
10551 operands[1] = force_reg (mode, operands[1]);
10553 /* Recognize the case where operand[1] is a reference to thread-local
10554 data and load its address to a register. */
10555 if (tls_referenced_p (operands[1]))
10557 enum tls_model model;
10558 rtx tmp = operands[1];
10559 rtx addend = NULL;
10561 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10563 addend = XEXP (XEXP (tmp, 0), 1);
10564 tmp = XEXP (XEXP (tmp, 0), 0);
10567 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10568 model = SYMBOL_REF_TLS_MODEL (tmp);
10569 gcc_assert (model != 0);
10571 tmp = rs6000_legitimize_tls_address (tmp, model);
10572 if (addend)
10574 tmp = gen_rtx_PLUS (mode, tmp, addend);
10575 tmp = force_operand (tmp, operands[0]);
10577 operands[1] = tmp;
10580 /* 128-bit constant floating-point values on Darwin should really be loaded
10581 as two parts. However, this premature splitting is a problem when DFmode
10582 values can go into Altivec registers. */
10583 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10584 && GET_CODE (operands[1]) == CONST_DOUBLE)
10586 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10587 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10588 DFmode);
10589 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10590 GET_MODE_SIZE (DFmode)),
10591 simplify_gen_subreg (DFmode, operands[1], mode,
10592 GET_MODE_SIZE (DFmode)),
10593 DFmode);
10594 return;
10597 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10598 p1:SD) if p1 is not of floating point class and p0 is spilled as
10599 we can have no analogous movsd_store for this. */
10600 if (lra_in_progress && mode == DDmode
10601 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10602 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10603 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10604 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10606 enum reg_class cl;
10607 int regno = REGNO (SUBREG_REG (operands[1]));
10609 if (regno >= FIRST_PSEUDO_REGISTER)
10611 cl = reg_preferred_class (regno);
10612 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10614 if (regno >= 0 && ! FP_REGNO_P (regno))
10616 mode = SDmode;
10617 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10618 operands[1] = SUBREG_REG (operands[1]);
10621 if (lra_in_progress
10622 && mode == SDmode
10623 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10624 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10625 && (REG_P (operands[1])
10626 || (GET_CODE (operands[1]) == SUBREG
10627 && REG_P (SUBREG_REG (operands[1])))))
10629 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10630 ? SUBREG_REG (operands[1]) : operands[1]);
10631 enum reg_class cl;
10633 if (regno >= FIRST_PSEUDO_REGISTER)
10635 cl = reg_preferred_class (regno);
10636 gcc_assert (cl != NO_REGS);
10637 regno = ira_class_hard_regs[cl][0];
10639 if (FP_REGNO_P (regno))
10641 if (GET_MODE (operands[0]) != DDmode)
10642 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10643 emit_insn (gen_movsd_store (operands[0], operands[1]));
10645 else if (INT_REGNO_P (regno))
10646 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10647 else
10648 gcc_unreachable();
10649 return;
10651 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10652 p:DD)) if p0 is not of floating point class and p1 is spilled as
10653 we can have no analogous movsd_load for this. */
10654 if (lra_in_progress && mode == DDmode
10655 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10656 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10657 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10658 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10660 enum reg_class cl;
10661 int regno = REGNO (SUBREG_REG (operands[0]));
10663 if (regno >= FIRST_PSEUDO_REGISTER)
10665 cl = reg_preferred_class (regno);
10666 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10668 if (regno >= 0 && ! FP_REGNO_P (regno))
10670 mode = SDmode;
10671 operands[0] = SUBREG_REG (operands[0]);
10672 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10675 if (lra_in_progress
10676 && mode == SDmode
10677 && (REG_P (operands[0])
10678 || (GET_CODE (operands[0]) == SUBREG
10679 && REG_P (SUBREG_REG (operands[0]))))
10680 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10681 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10683 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10684 ? SUBREG_REG (operands[0]) : operands[0]);
10685 enum reg_class cl;
10687 if (regno >= FIRST_PSEUDO_REGISTER)
10689 cl = reg_preferred_class (regno);
10690 gcc_assert (cl != NO_REGS);
10691 regno = ira_class_hard_regs[cl][0];
10693 if (FP_REGNO_P (regno))
10695 if (GET_MODE (operands[1]) != DDmode)
10696 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10697 emit_insn (gen_movsd_load (operands[0], operands[1]));
10699 else if (INT_REGNO_P (regno))
10700 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10701 else
10702 gcc_unreachable();
10703 return;
10706 /* FIXME: In the long term, this switch statement should go away
10707 and be replaced by a sequence of tests based on things like
10708 mode == Pmode. */
10709 switch (mode)
10711 case E_HImode:
10712 case E_QImode:
10713 if (CONSTANT_P (operands[1])
10714 && GET_CODE (operands[1]) != CONST_INT)
10715 operands[1] = force_const_mem (mode, operands[1]);
10716 break;
10718 case E_TFmode:
10719 case E_TDmode:
10720 case E_IFmode:
10721 case E_KFmode:
10722 if (FLOAT128_2REG_P (mode))
10723 rs6000_eliminate_indexed_memrefs (operands);
10724 /* fall through */
10726 case E_DFmode:
10727 case E_DDmode:
10728 case E_SFmode:
10729 case E_SDmode:
10730 if (CONSTANT_P (operands[1])
10731 && ! easy_fp_constant (operands[1], mode))
10732 operands[1] = force_const_mem (mode, operands[1]);
10733 break;
10735 case E_V16QImode:
10736 case E_V8HImode:
10737 case E_V4SFmode:
10738 case E_V4SImode:
10739 case E_V2SFmode:
10740 case E_V2SImode:
10741 case E_V2DFmode:
10742 case E_V2DImode:
10743 case E_V1TImode:
10744 if (CONSTANT_P (operands[1])
10745 && !easy_vector_constant (operands[1], mode))
10746 operands[1] = force_const_mem (mode, operands[1]);
10747 break;
10749 case E_SImode:
10750 case E_DImode:
10751 /* Use default pattern for address of ELF small data */
10752 if (TARGET_ELF
10753 && mode == Pmode
10754 && DEFAULT_ABI == ABI_V4
10755 && (GET_CODE (operands[1]) == SYMBOL_REF
10756 || GET_CODE (operands[1]) == CONST)
10757 && small_data_operand (operands[1], mode))
10759 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10760 return;
10763 if (DEFAULT_ABI == ABI_V4
10764 && mode == Pmode && mode == SImode
10765 && flag_pic == 1 && got_operand (operands[1], mode))
10767 emit_insn (gen_movsi_got (operands[0], operands[1]));
10768 return;
10771 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10772 && TARGET_NO_TOC
10773 && ! flag_pic
10774 && mode == Pmode
10775 && CONSTANT_P (operands[1])
10776 && GET_CODE (operands[1]) != HIGH
10777 && GET_CODE (operands[1]) != CONST_INT)
10779 rtx target = (!can_create_pseudo_p ()
10780 ? operands[0]
10781 : gen_reg_rtx (mode));
10783 /* If this is a function address on -mcall-aixdesc,
10784 convert it to the address of the descriptor. */
10785 if (DEFAULT_ABI == ABI_AIX
10786 && GET_CODE (operands[1]) == SYMBOL_REF
10787 && XSTR (operands[1], 0)[0] == '.')
10789 const char *name = XSTR (operands[1], 0);
10790 rtx new_ref;
10791 while (*name == '.')
10792 name++;
10793 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10794 CONSTANT_POOL_ADDRESS_P (new_ref)
10795 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10796 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10797 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10798 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10799 operands[1] = new_ref;
10802 if (DEFAULT_ABI == ABI_DARWIN)
10804 #if TARGET_MACHO
10805 if (MACHO_DYNAMIC_NO_PIC_P)
10807 /* Take care of any required data indirection. */
10808 operands[1] = rs6000_machopic_legitimize_pic_address (
10809 operands[1], mode, operands[0]);
10810 if (operands[0] != operands[1])
10811 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10812 return;
10814 #endif
10815 emit_insn (gen_macho_high (target, operands[1]));
10816 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10817 return;
10820 emit_insn (gen_elf_high (target, operands[1]));
10821 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10822 return;
10825 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10826 and we have put it in the TOC, we just need to make a TOC-relative
10827 reference to it. */
10828 if (TARGET_TOC
10829 && GET_CODE (operands[1]) == SYMBOL_REF
10830 && use_toc_relative_ref (operands[1], mode))
10831 operands[1] = create_TOC_reference (operands[1], operands[0]);
10832 else if (mode == Pmode
10833 && CONSTANT_P (operands[1])
10834 && GET_CODE (operands[1]) != HIGH
10835 && ((GET_CODE (operands[1]) != CONST_INT
10836 && ! easy_fp_constant (operands[1], mode))
10837 || (GET_CODE (operands[1]) == CONST_INT
10838 && (num_insns_constant (operands[1], mode)
10839 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10840 || (GET_CODE (operands[0]) == REG
10841 && FP_REGNO_P (REGNO (operands[0]))))
10842 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10843 && (TARGET_CMODEL == CMODEL_SMALL
10844 || can_create_pseudo_p ()
10845 || (REG_P (operands[0])
10846 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10849 #if TARGET_MACHO
10850 /* Darwin uses a special PIC legitimizer. */
10851 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10853 operands[1] =
10854 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10855 operands[0]);
10856 if (operands[0] != operands[1])
10857 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10858 return;
10860 #endif
10862 /* If we are to limit the number of things we put in the TOC and
10863 this is a symbol plus a constant we can add in one insn,
10864 just put the symbol in the TOC and add the constant. */
10865 if (GET_CODE (operands[1]) == CONST
10866 && TARGET_NO_SUM_IN_TOC
10867 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10868 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10869 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10870 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10871 && ! side_effects_p (operands[0]))
10873 rtx sym =
10874 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10875 rtx other = XEXP (XEXP (operands[1], 0), 1);
10877 sym = force_reg (mode, sym);
10878 emit_insn (gen_add3_insn (operands[0], sym, other));
10879 return;
10882 operands[1] = force_const_mem (mode, operands[1]);
10884 if (TARGET_TOC
10885 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10886 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10888 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10889 operands[0]);
10890 operands[1] = gen_const_mem (mode, tocref);
10891 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10894 break;
10896 case E_TImode:
10897 if (!VECTOR_MEM_VSX_P (TImode))
10898 rs6000_eliminate_indexed_memrefs (operands);
10899 break;
10901 case E_PTImode:
10902 rs6000_eliminate_indexed_memrefs (operands);
10903 break;
10905 default:
10906 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10909 /* Above, we may have called force_const_mem which may have returned
10910 an invalid address. If we can, fix this up; otherwise, reload will
10911 have to deal with it. */
10912 if (GET_CODE (operands[1]) == MEM)
10913 operands[1] = validize_mem (operands[1]);
10915 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10918 /* Nonzero if we can use a floating-point register to pass this arg. */
10919 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10920 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10921 && (CUM)->fregno <= FP_ARG_MAX_REG \
10922 && TARGET_HARD_FLOAT)
10924 /* Nonzero if we can use an AltiVec register to pass this arg. */
10925 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10926 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10927 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10928 && TARGET_ALTIVEC_ABI \
10929 && (NAMED))
10931 /* Walk down the type tree of TYPE counting consecutive base elements.
10932 If *MODEP is VOIDmode, then set it to the first valid floating point
10933 or vector type. If a non-floating point or vector type is found, or
10934 if a floating point or vector type that doesn't match a non-VOIDmode
10935 *MODEP is found, then return -1, otherwise return the count in the
10936 sub-tree. */
10938 static int
10939 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10941 machine_mode mode;
10942 HOST_WIDE_INT size;
10944 switch (TREE_CODE (type))
10946 case REAL_TYPE:
10947 mode = TYPE_MODE (type);
10948 if (!SCALAR_FLOAT_MODE_P (mode))
10949 return -1;
10951 if (*modep == VOIDmode)
10952 *modep = mode;
10954 if (*modep == mode)
10955 return 1;
10957 break;
10959 case COMPLEX_TYPE:
10960 mode = TYPE_MODE (TREE_TYPE (type));
10961 if (!SCALAR_FLOAT_MODE_P (mode))
10962 return -1;
10964 if (*modep == VOIDmode)
10965 *modep = mode;
10967 if (*modep == mode)
10968 return 2;
10970 break;
10972 case VECTOR_TYPE:
10973 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10974 return -1;
10976 /* Use V4SImode as representative of all 128-bit vector types. */
10977 size = int_size_in_bytes (type);
10978 switch (size)
10980 case 16:
10981 mode = V4SImode;
10982 break;
10983 default:
10984 return -1;
10987 if (*modep == VOIDmode)
10988 *modep = mode;
10990 /* Vector modes are considered to be opaque: two vectors are
10991 equivalent for the purposes of being homogeneous aggregates
10992 if they are the same size. */
10993 if (*modep == mode)
10994 return 1;
10996 break;
10998 case ARRAY_TYPE:
11000 int count;
11001 tree index = TYPE_DOMAIN (type);
11003 /* Can't handle incomplete types nor sizes that are not
11004 fixed. */
11005 if (!COMPLETE_TYPE_P (type)
11006 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11007 return -1;
11009 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11010 if (count == -1
11011 || !index
11012 || !TYPE_MAX_VALUE (index)
11013 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11014 || !TYPE_MIN_VALUE (index)
11015 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11016 || count < 0)
11017 return -1;
11019 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11020 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11022 /* There must be no padding. */
11023 if (wi::to_wide (TYPE_SIZE (type))
11024 != count * GET_MODE_BITSIZE (*modep))
11025 return -1;
11027 return count;
11030 case RECORD_TYPE:
11032 int count = 0;
11033 int sub_count;
11034 tree field;
11036 /* Can't handle incomplete types nor sizes that are not
11037 fixed. */
11038 if (!COMPLETE_TYPE_P (type)
11039 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11040 return -1;
11042 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11044 if (TREE_CODE (field) != FIELD_DECL)
11045 continue;
11047 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11048 if (sub_count < 0)
11049 return -1;
11050 count += sub_count;
11053 /* There must be no padding. */
11054 if (wi::to_wide (TYPE_SIZE (type))
11055 != count * GET_MODE_BITSIZE (*modep))
11056 return -1;
11058 return count;
11061 case UNION_TYPE:
11062 case QUAL_UNION_TYPE:
11064 /* These aren't very interesting except in a degenerate case. */
11065 int count = 0;
11066 int sub_count;
11067 tree field;
11069 /* Can't handle incomplete types nor sizes that are not
11070 fixed. */
11071 if (!COMPLETE_TYPE_P (type)
11072 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11073 return -1;
11075 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11077 if (TREE_CODE (field) != FIELD_DECL)
11078 continue;
11080 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11081 if (sub_count < 0)
11082 return -1;
11083 count = count > sub_count ? count : sub_count;
11086 /* There must be no padding. */
11087 if (wi::to_wide (TYPE_SIZE (type))
11088 != count * GET_MODE_BITSIZE (*modep))
11089 return -1;
11091 return count;
11094 default:
11095 break;
11098 return -1;
11101 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11102 float or vector aggregate that shall be passed in FP/vector registers
11103 according to the ELFv2 ABI, return the homogeneous element mode in
11104 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11106 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11108 static bool
11109 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11110 machine_mode *elt_mode,
11111 int *n_elts)
11113 /* Note that we do not accept complex types at the top level as
11114 homogeneous aggregates; these types are handled via the
11115 targetm.calls.split_complex_arg mechanism. Complex types
11116 can be elements of homogeneous aggregates, however. */
11117 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
11118 && AGGREGATE_TYPE_P (type))
11120 machine_mode field_mode = VOIDmode;
11121 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11123 if (field_count > 0)
11125 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11126 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11128 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11129 up to AGGR_ARG_NUM_REG registers. */
11130 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11132 if (elt_mode)
11133 *elt_mode = field_mode;
11134 if (n_elts)
11135 *n_elts = field_count;
11136 return true;
11141 if (elt_mode)
11142 *elt_mode = mode;
11143 if (n_elts)
11144 *n_elts = 1;
11145 return false;
11148 /* Return a nonzero value to say to return the function value in
11149 memory, just as large structures are always returned. TYPE will be
11150 the data type of the value, and FNTYPE will be the type of the
11151 function doing the returning, or @code{NULL} for libcalls.
11153 The AIX ABI for the RS/6000 specifies that all structures are
11154 returned in memory. The Darwin ABI does the same.
11156 For the Darwin 64 Bit ABI, a function result can be returned in
11157 registers or in memory, depending on the size of the return data
11158 type. If it is returned in registers, the value occupies the same
11159 registers as it would if it were the first and only function
11160 argument. Otherwise, the function places its result in memory at
11161 the location pointed to by GPR3.
11163 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11164 but a draft put them in memory, and GCC used to implement the draft
11165 instead of the final standard. Therefore, aix_struct_return
11166 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11167 compatibility can change DRAFT_V4_STRUCT_RET to override the
11168 default, and -m switches get the final word. See
11169 rs6000_option_override_internal for more details.
11171 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11172 long double support is enabled. These values are returned in memory.
11174 int_size_in_bytes returns -1 for variable size objects, which go in
11175 memory always. The cast to unsigned makes -1 > 8. */
11177 static bool
11178 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11180 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11181 if (TARGET_MACHO
11182 && rs6000_darwin64_abi
11183 && TREE_CODE (type) == RECORD_TYPE
11184 && int_size_in_bytes (type) > 0)
11186 CUMULATIVE_ARGS valcum;
11187 rtx valret;
11189 valcum.words = 0;
11190 valcum.fregno = FP_ARG_MIN_REG;
11191 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11192 /* Do a trial code generation as if this were going to be passed
11193 as an argument; if any part goes in memory, we return NULL. */
11194 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11195 if (valret)
11196 return false;
11197 /* Otherwise fall through to more conventional ABI rules. */
11200 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11201 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11202 NULL, NULL))
11203 return false;
11205 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11206 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11207 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11208 return false;
11210 if (AGGREGATE_TYPE_P (type)
11211 && (aix_struct_return
11212 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11213 return true;
11215 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11216 modes only exist for GCC vector types if -maltivec. */
11217 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11218 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11219 return false;
11221 /* Return synthetic vectors in memory. */
11222 if (TREE_CODE (type) == VECTOR_TYPE
11223 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11225 static bool warned_for_return_big_vectors = false;
11226 if (!warned_for_return_big_vectors)
11228 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11229 "non-standard ABI extension with no compatibility "
11230 "guarantee");
11231 warned_for_return_big_vectors = true;
11233 return true;
11236 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11237 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11238 return true;
11240 return false;
11243 /* Specify whether values returned in registers should be at the most
11244 significant end of a register. We want aggregates returned by
11245 value to match the way aggregates are passed to functions. */
11247 static bool
11248 rs6000_return_in_msb (const_tree valtype)
11250 return (DEFAULT_ABI == ABI_ELFv2
11251 && BYTES_BIG_ENDIAN
11252 && AGGREGATE_TYPE_P (valtype)
11253 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11254 == PAD_UPWARD));
11257 #ifdef HAVE_AS_GNU_ATTRIBUTE
11258 /* Return TRUE if a call to function FNDECL may be one that
11259 potentially affects the function calling ABI of the object file. */
11261 static bool
11262 call_ABI_of_interest (tree fndecl)
11264 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11266 struct cgraph_node *c_node;
11268 /* Libcalls are always interesting. */
11269 if (fndecl == NULL_TREE)
11270 return true;
11272 /* Any call to an external function is interesting. */
11273 if (DECL_EXTERNAL (fndecl))
11274 return true;
11276 /* Interesting functions that we are emitting in this object file. */
11277 c_node = cgraph_node::get (fndecl);
11278 c_node = c_node->ultimate_alias_target ();
11279 return !c_node->only_called_directly_p ();
11281 return false;
11283 #endif
11285 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11286 for a call to a function whose data type is FNTYPE.
11287 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11289 For incoming args we set the number of arguments in the prototype large
11290 so we never return a PARALLEL. */
11292 void
11293 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11294 rtx libname ATTRIBUTE_UNUSED, int incoming,
11295 int libcall, int n_named_args,
11296 tree fndecl ATTRIBUTE_UNUSED,
11297 machine_mode return_mode ATTRIBUTE_UNUSED)
11299 static CUMULATIVE_ARGS zero_cumulative;
11301 *cum = zero_cumulative;
11302 cum->words = 0;
11303 cum->fregno = FP_ARG_MIN_REG;
11304 cum->vregno = ALTIVEC_ARG_MIN_REG;
11305 cum->prototype = (fntype && prototype_p (fntype));
11306 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11307 ? CALL_LIBCALL : CALL_NORMAL);
11308 cum->sysv_gregno = GP_ARG_MIN_REG;
11309 cum->stdarg = stdarg_p (fntype);
11310 cum->libcall = libcall;
11312 cum->nargs_prototype = 0;
11313 if (incoming || cum->prototype)
11314 cum->nargs_prototype = n_named_args;
11316 /* Check for a longcall attribute. */
11317 if ((!fntype && rs6000_default_long_calls)
11318 || (fntype
11319 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11320 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11321 cum->call_cookie |= CALL_LONG;
11323 if (TARGET_DEBUG_ARG)
11325 fprintf (stderr, "\ninit_cumulative_args:");
11326 if (fntype)
11328 tree ret_type = TREE_TYPE (fntype);
11329 fprintf (stderr, " ret code = %s,",
11330 get_tree_code_name (TREE_CODE (ret_type)));
11333 if (cum->call_cookie & CALL_LONG)
11334 fprintf (stderr, " longcall,");
11336 fprintf (stderr, " proto = %d, nargs = %d\n",
11337 cum->prototype, cum->nargs_prototype);
11340 #ifdef HAVE_AS_GNU_ATTRIBUTE
11341 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11343 cum->escapes = call_ABI_of_interest (fndecl);
11344 if (cum->escapes)
11346 tree return_type;
11348 if (fntype)
11350 return_type = TREE_TYPE (fntype);
11351 return_mode = TYPE_MODE (return_type);
11353 else
11354 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11356 if (return_type != NULL)
11358 if (TREE_CODE (return_type) == RECORD_TYPE
11359 && TYPE_TRANSPARENT_AGGR (return_type))
11361 return_type = TREE_TYPE (first_field (return_type));
11362 return_mode = TYPE_MODE (return_type);
11364 if (AGGREGATE_TYPE_P (return_type)
11365 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11366 <= 8))
11367 rs6000_returns_struct = true;
11369 if (SCALAR_FLOAT_MODE_P (return_mode))
11371 rs6000_passes_float = true;
11372 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11373 && (FLOAT128_IBM_P (return_mode)
11374 || FLOAT128_IEEE_P (return_mode)
11375 || (return_type != NULL
11376 && (TYPE_MAIN_VARIANT (return_type)
11377 == long_double_type_node))))
11378 rs6000_passes_long_double = true;
11380 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11381 || PAIRED_VECTOR_MODE (return_mode))
11382 rs6000_passes_vector = true;
11385 #endif
11387 if (fntype
11388 && !TARGET_ALTIVEC
11389 && TARGET_ALTIVEC_ABI
11390 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11392 error ("cannot return value in vector register because"
11393 " altivec instructions are disabled, use %qs"
11394 " to enable them", "-maltivec");
11398 /* The mode the ABI uses for a word. This is not the same as word_mode
11399 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11401 static scalar_int_mode
11402 rs6000_abi_word_mode (void)
11404 return TARGET_32BIT ? SImode : DImode;
11407 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11408 static char *
11409 rs6000_offload_options (void)
11411 if (TARGET_64BIT)
11412 return xstrdup ("-foffload-abi=lp64");
11413 else
11414 return xstrdup ("-foffload-abi=ilp32");
11417 /* On rs6000, function arguments are promoted, as are function return
11418 values. */
11420 static machine_mode
11421 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11422 machine_mode mode,
11423 int *punsignedp ATTRIBUTE_UNUSED,
11424 const_tree, int)
11426 PROMOTE_MODE (mode, *punsignedp, type);
11428 return mode;
11431 /* Return true if TYPE must be passed on the stack and not in registers. */
11433 static bool
11434 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11436 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11437 return must_pass_in_stack_var_size (mode, type);
11438 else
11439 return must_pass_in_stack_var_size_or_pad (mode, type);
11442 static inline bool
11443 is_complex_IBM_long_double (machine_mode mode)
11445 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
11448 /* Whether ABI_V4 passes MODE args to a function in floating point
11449 registers. */
11451 static bool
11452 abi_v4_pass_in_fpr (machine_mode mode, bool named)
11454 if (!TARGET_HARD_FLOAT)
11455 return false;
11456 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11457 return true;
11458 if (TARGET_SINGLE_FLOAT && mode == SFmode && named)
11459 return true;
11460 /* ABI_V4 passes complex IBM long double in 8 gprs.
11461 Stupid, but we can't change the ABI now. */
11462 if (is_complex_IBM_long_double (mode))
11463 return false;
11464 if (FLOAT128_2REG_P (mode))
11465 return true;
11466 if (DECIMAL_FLOAT_MODE_P (mode))
11467 return true;
11468 return false;
11471 /* Implement TARGET_FUNCTION_ARG_PADDING.
11473 For the AIX ABI structs are always stored left shifted in their
11474 argument slot. */
11476 static pad_direction
11477 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11479 #ifndef AGGREGATE_PADDING_FIXED
11480 #define AGGREGATE_PADDING_FIXED 0
11481 #endif
11482 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11483 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11484 #endif
11486 if (!AGGREGATE_PADDING_FIXED)
11488 /* GCC used to pass structures of the same size as integer types as
11489 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11490 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11491 passed padded downward, except that -mstrict-align further
11492 muddied the water in that multi-component structures of 2 and 4
11493 bytes in size were passed padded upward.
11495 The following arranges for best compatibility with previous
11496 versions of gcc, but removes the -mstrict-align dependency. */
11497 if (BYTES_BIG_ENDIAN)
11499 HOST_WIDE_INT size = 0;
11501 if (mode == BLKmode)
11503 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11504 size = int_size_in_bytes (type);
11506 else
11507 size = GET_MODE_SIZE (mode);
11509 if (size == 1 || size == 2 || size == 4)
11510 return PAD_DOWNWARD;
11512 return PAD_UPWARD;
11515 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11517 if (type != 0 && AGGREGATE_TYPE_P (type))
11518 return PAD_UPWARD;
11521 /* Fall back to the default. */
11522 return default_function_arg_padding (mode, type);
11525 /* If defined, a C expression that gives the alignment boundary, in bits,
11526 of an argument with the specified mode and type. If it is not defined,
11527 PARM_BOUNDARY is used for all arguments.
11529 V.4 wants long longs and doubles to be double word aligned. Just
11530 testing the mode size is a boneheaded way to do this as it means
11531 that other types such as complex int are also double word aligned.
11532 However, we're stuck with this because changing the ABI might break
11533 existing library interfaces.
11535 Quadword align Altivec/VSX vectors.
11536 Quadword align large synthetic vector types. */
11538 static unsigned int
11539 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11541 machine_mode elt_mode;
11542 int n_elts;
11544 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11546 if (DEFAULT_ABI == ABI_V4
11547 && (GET_MODE_SIZE (mode) == 8
11548 || (TARGET_HARD_FLOAT
11549 && !is_complex_IBM_long_double (mode)
11550 && FLOAT128_2REG_P (mode))))
11551 return 64;
11552 else if (FLOAT128_VECTOR_P (mode))
11553 return 128;
11554 else if (PAIRED_VECTOR_MODE (mode)
11555 || (type && TREE_CODE (type) == VECTOR_TYPE
11556 && int_size_in_bytes (type) >= 8
11557 && int_size_in_bytes (type) < 16))
11558 return 64;
11559 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11560 || (type && TREE_CODE (type) == VECTOR_TYPE
11561 && int_size_in_bytes (type) >= 16))
11562 return 128;
11564 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11565 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11566 -mcompat-align-parm is used. */
11567 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11568 || DEFAULT_ABI == ABI_ELFv2)
11569 && type && TYPE_ALIGN (type) > 64)
11571 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11572 or homogeneous float/vector aggregates here. We already handled
11573 vector aggregates above, but still need to check for float here. */
11574 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11575 && !SCALAR_FLOAT_MODE_P (elt_mode));
11577 /* We used to check for BLKmode instead of the above aggregate type
11578 check. Warn when this results in any difference to the ABI. */
11579 if (aggregate_p != (mode == BLKmode))
11581 static bool warned;
11582 if (!warned && warn_psabi)
11584 warned = true;
11585 inform (input_location,
11586 "the ABI of passing aggregates with %d-byte alignment"
11587 " has changed in GCC 5",
11588 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11592 if (aggregate_p)
11593 return 128;
11596 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11597 implement the "aggregate type" check as a BLKmode check here; this
11598 means certain aggregate types are in fact not aligned. */
11599 if (TARGET_MACHO && rs6000_darwin64_abi
11600 && mode == BLKmode
11601 && type && TYPE_ALIGN (type) > 64)
11602 return 128;
11604 return PARM_BOUNDARY;
11607 /* The offset in words to the start of the parameter save area. */
11609 static unsigned int
11610 rs6000_parm_offset (void)
11612 return (DEFAULT_ABI == ABI_V4 ? 2
11613 : DEFAULT_ABI == ABI_ELFv2 ? 4
11614 : 6);
11617 /* For a function parm of MODE and TYPE, return the starting word in
11618 the parameter area. NWORDS of the parameter area are already used. */
11620 static unsigned int
11621 rs6000_parm_start (machine_mode mode, const_tree type,
11622 unsigned int nwords)
11624 unsigned int align;
11626 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11627 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11630 /* Compute the size (in words) of a function argument. */
11632 static unsigned long
11633 rs6000_arg_size (machine_mode mode, const_tree type)
11635 unsigned long size;
11637 if (mode != BLKmode)
11638 size = GET_MODE_SIZE (mode);
11639 else
11640 size = int_size_in_bytes (type);
11642 if (TARGET_32BIT)
11643 return (size + 3) >> 2;
11644 else
11645 return (size + 7) >> 3;
11648 /* Use this to flush pending int fields. */
11650 static void
11651 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11652 HOST_WIDE_INT bitpos, int final)
11654 unsigned int startbit, endbit;
11655 int intregs, intoffset;
11657 /* Handle the situations where a float is taking up the first half
11658 of the GPR, and the other half is empty (typically due to
11659 alignment restrictions). We can detect this by a 8-byte-aligned
11660 int field, or by seeing that this is the final flush for this
11661 argument. Count the word and continue on. */
11662 if (cum->floats_in_gpr == 1
11663 && (cum->intoffset % 64 == 0
11664 || (cum->intoffset == -1 && final)))
11666 cum->words++;
11667 cum->floats_in_gpr = 0;
11670 if (cum->intoffset == -1)
11671 return;
11673 intoffset = cum->intoffset;
11674 cum->intoffset = -1;
11675 cum->floats_in_gpr = 0;
11677 if (intoffset % BITS_PER_WORD != 0)
11679 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11680 if (!int_mode_for_size (bits, 0).exists ())
11682 /* We couldn't find an appropriate mode, which happens,
11683 e.g., in packed structs when there are 3 bytes to load.
11684 Back intoffset back to the beginning of the word in this
11685 case. */
11686 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11690 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11691 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11692 intregs = (endbit - startbit) / BITS_PER_WORD;
11693 cum->words += intregs;
11694 /* words should be unsigned. */
11695 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11697 int pad = (endbit/BITS_PER_WORD) - cum->words;
11698 cum->words += pad;
11702 /* The darwin64 ABI calls for us to recurse down through structs,
11703 looking for elements passed in registers. Unfortunately, we have
11704 to track int register count here also because of misalignments
11705 in powerpc alignment mode. */
11707 static void
11708 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11709 const_tree type,
11710 HOST_WIDE_INT startbitpos)
11712 tree f;
11714 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11715 if (TREE_CODE (f) == FIELD_DECL)
11717 HOST_WIDE_INT bitpos = startbitpos;
11718 tree ftype = TREE_TYPE (f);
11719 machine_mode mode;
11720 if (ftype == error_mark_node)
11721 continue;
11722 mode = TYPE_MODE (ftype);
11724 if (DECL_SIZE (f) != 0
11725 && tree_fits_uhwi_p (bit_position (f)))
11726 bitpos += int_bit_position (f);
11728 /* ??? FIXME: else assume zero offset. */
11730 if (TREE_CODE (ftype) == RECORD_TYPE)
11731 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11732 else if (USE_FP_FOR_ARG_P (cum, mode))
11734 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11735 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11736 cum->fregno += n_fpregs;
11737 /* Single-precision floats present a special problem for
11738 us, because they are smaller than an 8-byte GPR, and so
11739 the structure-packing rules combined with the standard
11740 varargs behavior mean that we want to pack float/float
11741 and float/int combinations into a single register's
11742 space. This is complicated by the arg advance flushing,
11743 which works on arbitrarily large groups of int-type
11744 fields. */
11745 if (mode == SFmode)
11747 if (cum->floats_in_gpr == 1)
11749 /* Two floats in a word; count the word and reset
11750 the float count. */
11751 cum->words++;
11752 cum->floats_in_gpr = 0;
11754 else if (bitpos % 64 == 0)
11756 /* A float at the beginning of an 8-byte word;
11757 count it and put off adjusting cum->words until
11758 we see if a arg advance flush is going to do it
11759 for us. */
11760 cum->floats_in_gpr++;
11762 else
11764 /* The float is at the end of a word, preceded
11765 by integer fields, so the arg advance flush
11766 just above has already set cum->words and
11767 everything is taken care of. */
11770 else
11771 cum->words += n_fpregs;
11773 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11775 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11776 cum->vregno++;
11777 cum->words += 2;
11779 else if (cum->intoffset == -1)
11780 cum->intoffset = bitpos;
11784 /* Check for an item that needs to be considered specially under the darwin 64
11785 bit ABI. These are record types where the mode is BLK or the structure is
11786 8 bytes in size. */
11787 static int
11788 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11790 return rs6000_darwin64_abi
11791 && ((mode == BLKmode
11792 && TREE_CODE (type) == RECORD_TYPE
11793 && int_size_in_bytes (type) > 0)
11794 || (type && TREE_CODE (type) == RECORD_TYPE
11795 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11798 /* Update the data in CUM to advance over an argument
11799 of mode MODE and data type TYPE.
11800 (TYPE is null for libcalls where that information may not be available.)
11802 Note that for args passed by reference, function_arg will be called
11803 with MODE and TYPE set to that of the pointer to the arg, not the arg
11804 itself. */
11806 static void
11807 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11808 const_tree type, bool named, int depth)
11810 machine_mode elt_mode;
11811 int n_elts;
11813 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11815 /* Only tick off an argument if we're not recursing. */
11816 if (depth == 0)
11817 cum->nargs_prototype--;
11819 #ifdef HAVE_AS_GNU_ATTRIBUTE
11820 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11821 && cum->escapes)
11823 if (SCALAR_FLOAT_MODE_P (mode))
11825 rs6000_passes_float = true;
11826 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11827 && (FLOAT128_IBM_P (mode)
11828 || FLOAT128_IEEE_P (mode)
11829 || (type != NULL
11830 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11831 rs6000_passes_long_double = true;
11833 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11834 || (PAIRED_VECTOR_MODE (mode)
11835 && !cum->stdarg
11836 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11837 rs6000_passes_vector = true;
11839 #endif
11841 if (TARGET_ALTIVEC_ABI
11842 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11843 || (type && TREE_CODE (type) == VECTOR_TYPE
11844 && int_size_in_bytes (type) == 16)))
11846 bool stack = false;
11848 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11850 cum->vregno += n_elts;
11852 if (!TARGET_ALTIVEC)
11853 error ("cannot pass argument in vector register because"
11854 " altivec instructions are disabled, use %qs"
11855 " to enable them", "-maltivec");
11857 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11858 even if it is going to be passed in a vector register.
11859 Darwin does the same for variable-argument functions. */
11860 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11861 && TARGET_64BIT)
11862 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11863 stack = true;
11865 else
11866 stack = true;
11868 if (stack)
11870 int align;
11872 /* Vector parameters must be 16-byte aligned. In 32-bit
11873 mode this means we need to take into account the offset
11874 to the parameter save area. In 64-bit mode, they just
11875 have to start on an even word, since the parameter save
11876 area is 16-byte aligned. */
11877 if (TARGET_32BIT)
11878 align = -(rs6000_parm_offset () + cum->words) & 3;
11879 else
11880 align = cum->words & 1;
11881 cum->words += align + rs6000_arg_size (mode, type);
11883 if (TARGET_DEBUG_ARG)
11885 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11886 cum->words, align);
11887 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11888 cum->nargs_prototype, cum->prototype,
11889 GET_MODE_NAME (mode));
11893 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11895 int size = int_size_in_bytes (type);
11896 /* Variable sized types have size == -1 and are
11897 treated as if consisting entirely of ints.
11898 Pad to 16 byte boundary if needed. */
11899 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11900 && (cum->words % 2) != 0)
11901 cum->words++;
11902 /* For varargs, we can just go up by the size of the struct. */
11903 if (!named)
11904 cum->words += (size + 7) / 8;
11905 else
11907 /* It is tempting to say int register count just goes up by
11908 sizeof(type)/8, but this is wrong in a case such as
11909 { int; double; int; } [powerpc alignment]. We have to
11910 grovel through the fields for these too. */
11911 cum->intoffset = 0;
11912 cum->floats_in_gpr = 0;
11913 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11914 rs6000_darwin64_record_arg_advance_flush (cum,
11915 size * BITS_PER_UNIT, 1);
11917 if (TARGET_DEBUG_ARG)
11919 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11920 cum->words, TYPE_ALIGN (type), size);
11921 fprintf (stderr,
11922 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11923 cum->nargs_prototype, cum->prototype,
11924 GET_MODE_NAME (mode));
11927 else if (DEFAULT_ABI == ABI_V4)
11929 if (abi_v4_pass_in_fpr (mode, named))
11931 /* _Decimal128 must use an even/odd register pair. This assumes
11932 that the register number is odd when fregno is odd. */
11933 if (mode == TDmode && (cum->fregno % 2) == 1)
11934 cum->fregno++;
11936 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11937 <= FP_ARG_V4_MAX_REG)
11938 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11939 else
11941 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11942 if (mode == DFmode || FLOAT128_IBM_P (mode)
11943 || mode == DDmode || mode == TDmode)
11944 cum->words += cum->words & 1;
11945 cum->words += rs6000_arg_size (mode, type);
11948 else
11950 int n_words = rs6000_arg_size (mode, type);
11951 int gregno = cum->sysv_gregno;
11953 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11954 As does any other 2 word item such as complex int due to a
11955 historical mistake. */
11956 if (n_words == 2)
11957 gregno += (1 - gregno) & 1;
11959 /* Multi-reg args are not split between registers and stack. */
11960 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11962 /* Long long is aligned on the stack. So are other 2 word
11963 items such as complex int due to a historical mistake. */
11964 if (n_words == 2)
11965 cum->words += cum->words & 1;
11966 cum->words += n_words;
11969 /* Note: continuing to accumulate gregno past when we've started
11970 spilling to the stack indicates the fact that we've started
11971 spilling to the stack to expand_builtin_saveregs. */
11972 cum->sysv_gregno = gregno + n_words;
11975 if (TARGET_DEBUG_ARG)
11977 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11978 cum->words, cum->fregno);
11979 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11980 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11981 fprintf (stderr, "mode = %4s, named = %d\n",
11982 GET_MODE_NAME (mode), named);
11985 else
11987 int n_words = rs6000_arg_size (mode, type);
11988 int start_words = cum->words;
11989 int align_words = rs6000_parm_start (mode, type, start_words);
11991 cum->words = align_words + n_words;
11993 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11995 /* _Decimal128 must be passed in an even/odd float register pair.
11996 This assumes that the register number is odd when fregno is
11997 odd. */
11998 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11999 cum->fregno++;
12000 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12003 if (TARGET_DEBUG_ARG)
12005 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12006 cum->words, cum->fregno);
12007 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12008 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12009 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12010 named, align_words - start_words, depth);
12015 static void
12016 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12017 const_tree type, bool named)
12019 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12023 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12024 structure between cum->intoffset and bitpos to integer registers. */
12026 static void
12027 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12028 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12030 machine_mode mode;
12031 unsigned int regno;
12032 unsigned int startbit, endbit;
12033 int this_regno, intregs, intoffset;
12034 rtx reg;
12036 if (cum->intoffset == -1)
12037 return;
12039 intoffset = cum->intoffset;
12040 cum->intoffset = -1;
12042 /* If this is the trailing part of a word, try to only load that
12043 much into the register. Otherwise load the whole register. Note
12044 that in the latter case we may pick up unwanted bits. It's not a
12045 problem at the moment but may wish to revisit. */
12047 if (intoffset % BITS_PER_WORD != 0)
12049 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12050 if (!int_mode_for_size (bits, 0).exists (&mode))
12052 /* We couldn't find an appropriate mode, which happens,
12053 e.g., in packed structs when there are 3 bytes to load.
12054 Back intoffset back to the beginning of the word in this
12055 case. */
12056 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12057 mode = word_mode;
12060 else
12061 mode = word_mode;
12063 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12064 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12065 intregs = (endbit - startbit) / BITS_PER_WORD;
12066 this_regno = cum->words + intoffset / BITS_PER_WORD;
12068 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12069 cum->use_stack = 1;
12071 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12072 if (intregs <= 0)
12073 return;
12075 intoffset /= BITS_PER_UNIT;
12078 regno = GP_ARG_MIN_REG + this_regno;
12079 reg = gen_rtx_REG (mode, regno);
12080 rvec[(*k)++] =
12081 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12083 this_regno += 1;
12084 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12085 mode = word_mode;
12086 intregs -= 1;
12088 while (intregs > 0);
12091 /* Recursive workhorse for the following. */
12093 static void
12094 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12095 HOST_WIDE_INT startbitpos, rtx rvec[],
12096 int *k)
12098 tree f;
12100 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12101 if (TREE_CODE (f) == FIELD_DECL)
12103 HOST_WIDE_INT bitpos = startbitpos;
12104 tree ftype = TREE_TYPE (f);
12105 machine_mode mode;
12106 if (ftype == error_mark_node)
12107 continue;
12108 mode = TYPE_MODE (ftype);
12110 if (DECL_SIZE (f) != 0
12111 && tree_fits_uhwi_p (bit_position (f)))
12112 bitpos += int_bit_position (f);
12114 /* ??? FIXME: else assume zero offset. */
12116 if (TREE_CODE (ftype) == RECORD_TYPE)
12117 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12118 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12120 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12121 #if 0
12122 switch (mode)
12124 case E_SCmode: mode = SFmode; break;
12125 case E_DCmode: mode = DFmode; break;
12126 case E_TCmode: mode = TFmode; break;
12127 default: break;
12129 #endif
12130 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12131 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12133 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12134 && (mode == TFmode || mode == TDmode));
12135 /* Long double or _Decimal128 split over regs and memory. */
12136 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12137 cum->use_stack=1;
12139 rvec[(*k)++]
12140 = gen_rtx_EXPR_LIST (VOIDmode,
12141 gen_rtx_REG (mode, cum->fregno++),
12142 GEN_INT (bitpos / BITS_PER_UNIT));
12143 if (FLOAT128_2REG_P (mode))
12144 cum->fregno++;
12146 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12148 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12149 rvec[(*k)++]
12150 = gen_rtx_EXPR_LIST (VOIDmode,
12151 gen_rtx_REG (mode, cum->vregno++),
12152 GEN_INT (bitpos / BITS_PER_UNIT));
12154 else if (cum->intoffset == -1)
12155 cum->intoffset = bitpos;
12159 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12160 the register(s) to be used for each field and subfield of a struct
12161 being passed by value, along with the offset of where the
12162 register's value may be found in the block. FP fields go in FP
12163 register, vector fields go in vector registers, and everything
12164 else goes in int registers, packed as in memory.
12166 This code is also used for function return values. RETVAL indicates
12167 whether this is the case.
12169 Much of this is taken from the SPARC V9 port, which has a similar
12170 calling convention. */
12172 static rtx
12173 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12174 bool named, bool retval)
12176 rtx rvec[FIRST_PSEUDO_REGISTER];
12177 int k = 1, kbase = 1;
12178 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12179 /* This is a copy; modifications are not visible to our caller. */
12180 CUMULATIVE_ARGS copy_cum = *orig_cum;
12181 CUMULATIVE_ARGS *cum = &copy_cum;
12183 /* Pad to 16 byte boundary if needed. */
12184 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12185 && (cum->words % 2) != 0)
12186 cum->words++;
12188 cum->intoffset = 0;
12189 cum->use_stack = 0;
12190 cum->named = named;
12192 /* Put entries into rvec[] for individual FP and vector fields, and
12193 for the chunks of memory that go in int regs. Note we start at
12194 element 1; 0 is reserved for an indication of using memory, and
12195 may or may not be filled in below. */
12196 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12197 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12199 /* If any part of the struct went on the stack put all of it there.
12200 This hack is because the generic code for
12201 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12202 parts of the struct are not at the beginning. */
12203 if (cum->use_stack)
12205 if (retval)
12206 return NULL_RTX; /* doesn't go in registers at all */
12207 kbase = 0;
12208 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12210 if (k > 1 || cum->use_stack)
12211 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12212 else
12213 return NULL_RTX;
12216 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12218 static rtx
12219 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12220 int align_words)
12222 int n_units;
12223 int i, k;
12224 rtx rvec[GP_ARG_NUM_REG + 1];
12226 if (align_words >= GP_ARG_NUM_REG)
12227 return NULL_RTX;
12229 n_units = rs6000_arg_size (mode, type);
12231 /* Optimize the simple case where the arg fits in one gpr, except in
12232 the case of BLKmode due to assign_parms assuming that registers are
12233 BITS_PER_WORD wide. */
12234 if (n_units == 0
12235 || (n_units == 1 && mode != BLKmode))
12236 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12238 k = 0;
12239 if (align_words + n_units > GP_ARG_NUM_REG)
12240 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12241 using a magic NULL_RTX component.
12242 This is not strictly correct. Only some of the arg belongs in
12243 memory, not all of it. However, the normal scheme using
12244 function_arg_partial_nregs can result in unusual subregs, eg.
12245 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12246 store the whole arg to memory is often more efficient than code
12247 to store pieces, and we know that space is available in the right
12248 place for the whole arg. */
12249 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12251 i = 0;
12254 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12255 rtx off = GEN_INT (i++ * 4);
12256 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12258 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12260 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12263 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12264 but must also be copied into the parameter save area starting at
12265 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12266 to the GPRs and/or memory. Return the number of elements used. */
12268 static int
12269 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12270 int align_words, rtx *rvec)
12272 int k = 0;
12274 if (align_words < GP_ARG_NUM_REG)
12276 int n_words = rs6000_arg_size (mode, type);
12278 if (align_words + n_words > GP_ARG_NUM_REG
12279 || mode == BLKmode
12280 || (TARGET_32BIT && TARGET_POWERPC64))
12282 /* If this is partially on the stack, then we only
12283 include the portion actually in registers here. */
12284 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12285 int i = 0;
12287 if (align_words + n_words > GP_ARG_NUM_REG)
12289 /* Not all of the arg fits in gprs. Say that it goes in memory
12290 too, using a magic NULL_RTX component. Also see comment in
12291 rs6000_mixed_function_arg for why the normal
12292 function_arg_partial_nregs scheme doesn't work in this case. */
12293 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12298 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12299 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12300 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12302 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12304 else
12306 /* The whole arg fits in gprs. */
12307 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12308 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12311 else
12313 /* It's entirely in memory. */
12314 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12317 return k;
12320 /* RVEC is a vector of K components of an argument of mode MODE.
12321 Construct the final function_arg return value from it. */
12323 static rtx
12324 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12326 gcc_assert (k >= 1);
12328 /* Avoid returning a PARALLEL in the trivial cases. */
12329 if (k == 1)
12331 if (XEXP (rvec[0], 0) == NULL_RTX)
12332 return NULL_RTX;
12334 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12335 return XEXP (rvec[0], 0);
12338 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12341 /* Determine where to put an argument to a function.
12342 Value is zero to push the argument on the stack,
12343 or a hard register in which to store the argument.
12345 MODE is the argument's machine mode.
12346 TYPE is the data type of the argument (as a tree).
12347 This is null for libcalls where that information may
12348 not be available.
12349 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12350 the preceding args and about the function being called. It is
12351 not modified in this routine.
12352 NAMED is nonzero if this argument is a named parameter
12353 (otherwise it is an extra parameter matching an ellipsis).
12355 On RS/6000 the first eight words of non-FP are normally in registers
12356 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12357 Under V.4, the first 8 FP args are in registers.
12359 If this is floating-point and no prototype is specified, we use
12360 both an FP and integer register (or possibly FP reg and stack). Library
12361 functions (when CALL_LIBCALL is set) always have the proper types for args,
12362 so we can pass the FP value just in one register. emit_library_function
12363 doesn't support PARALLEL anyway.
12365 Note that for args passed by reference, function_arg will be called
12366 with MODE and TYPE set to that of the pointer to the arg, not the arg
12367 itself. */
12369 static rtx
12370 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12371 const_tree type, bool named)
12373 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12374 enum rs6000_abi abi = DEFAULT_ABI;
12375 machine_mode elt_mode;
12376 int n_elts;
12378 /* Return a marker to indicate whether CR1 needs to set or clear the
12379 bit that V.4 uses to say fp args were passed in registers.
12380 Assume that we don't need the marker for software floating point,
12381 or compiler generated library calls. */
12382 if (mode == VOIDmode)
12384 if (abi == ABI_V4
12385 && (cum->call_cookie & CALL_LIBCALL) == 0
12386 && (cum->stdarg
12387 || (cum->nargs_prototype < 0
12388 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12389 && TARGET_HARD_FLOAT)
12390 return GEN_INT (cum->call_cookie
12391 | ((cum->fregno == FP_ARG_MIN_REG)
12392 ? CALL_V4_SET_FP_ARGS
12393 : CALL_V4_CLEAR_FP_ARGS));
12395 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12398 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12400 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12402 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12403 if (rslt != NULL_RTX)
12404 return rslt;
12405 /* Else fall through to usual handling. */
12408 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12410 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12411 rtx r, off;
12412 int i, k = 0;
12414 /* Do we also need to pass this argument in the parameter save area?
12415 Library support functions for IEEE 128-bit are assumed to not need the
12416 value passed both in GPRs and in vector registers. */
12417 if (TARGET_64BIT && !cum->prototype
12418 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12420 int align_words = ROUND_UP (cum->words, 2);
12421 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12424 /* Describe where this argument goes in the vector registers. */
12425 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12427 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12428 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12429 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12432 return rs6000_finish_function_arg (mode, rvec, k);
12434 else if (TARGET_ALTIVEC_ABI
12435 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12436 || (type && TREE_CODE (type) == VECTOR_TYPE
12437 && int_size_in_bytes (type) == 16)))
12439 if (named || abi == ABI_V4)
12440 return NULL_RTX;
12441 else
12443 /* Vector parameters to varargs functions under AIX or Darwin
12444 get passed in memory and possibly also in GPRs. */
12445 int align, align_words, n_words;
12446 machine_mode part_mode;
12448 /* Vector parameters must be 16-byte aligned. In 32-bit
12449 mode this means we need to take into account the offset
12450 to the parameter save area. In 64-bit mode, they just
12451 have to start on an even word, since the parameter save
12452 area is 16-byte aligned. */
12453 if (TARGET_32BIT)
12454 align = -(rs6000_parm_offset () + cum->words) & 3;
12455 else
12456 align = cum->words & 1;
12457 align_words = cum->words + align;
12459 /* Out of registers? Memory, then. */
12460 if (align_words >= GP_ARG_NUM_REG)
12461 return NULL_RTX;
12463 if (TARGET_32BIT && TARGET_POWERPC64)
12464 return rs6000_mixed_function_arg (mode, type, align_words);
12466 /* The vector value goes in GPRs. Only the part of the
12467 value in GPRs is reported here. */
12468 part_mode = mode;
12469 n_words = rs6000_arg_size (mode, type);
12470 if (align_words + n_words > GP_ARG_NUM_REG)
12471 /* Fortunately, there are only two possibilities, the value
12472 is either wholly in GPRs or half in GPRs and half not. */
12473 part_mode = DImode;
12475 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12479 else if (abi == ABI_V4)
12481 if (abi_v4_pass_in_fpr (mode, named))
12483 /* _Decimal128 must use an even/odd register pair. This assumes
12484 that the register number is odd when fregno is odd. */
12485 if (mode == TDmode && (cum->fregno % 2) == 1)
12486 cum->fregno++;
12488 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12489 <= FP_ARG_V4_MAX_REG)
12490 return gen_rtx_REG (mode, cum->fregno);
12491 else
12492 return NULL_RTX;
12494 else
12496 int n_words = rs6000_arg_size (mode, type);
12497 int gregno = cum->sysv_gregno;
12499 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12500 As does any other 2 word item such as complex int due to a
12501 historical mistake. */
12502 if (n_words == 2)
12503 gregno += (1 - gregno) & 1;
12505 /* Multi-reg args are not split between registers and stack. */
12506 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12507 return NULL_RTX;
12509 if (TARGET_32BIT && TARGET_POWERPC64)
12510 return rs6000_mixed_function_arg (mode, type,
12511 gregno - GP_ARG_MIN_REG);
12512 return gen_rtx_REG (mode, gregno);
12515 else
12517 int align_words = rs6000_parm_start (mode, type, cum->words);
12519 /* _Decimal128 must be passed in an even/odd float register pair.
12520 This assumes that the register number is odd when fregno is odd. */
12521 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12522 cum->fregno++;
12524 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12526 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12527 rtx r, off;
12528 int i, k = 0;
12529 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12530 int fpr_words;
12532 /* Do we also need to pass this argument in the parameter
12533 save area? */
12534 if (type && (cum->nargs_prototype <= 0
12535 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12536 && TARGET_XL_COMPAT
12537 && align_words >= GP_ARG_NUM_REG)))
12538 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12540 /* Describe where this argument goes in the fprs. */
12541 for (i = 0; i < n_elts
12542 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12544 /* Check if the argument is split over registers and memory.
12545 This can only ever happen for long double or _Decimal128;
12546 complex types are handled via split_complex_arg. */
12547 machine_mode fmode = elt_mode;
12548 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12550 gcc_assert (FLOAT128_2REG_P (fmode));
12551 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12554 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12555 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12556 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12559 /* If there were not enough FPRs to hold the argument, the rest
12560 usually goes into memory. However, if the current position
12561 is still within the register parameter area, a portion may
12562 actually have to go into GPRs.
12564 Note that it may happen that the portion of the argument
12565 passed in the first "half" of the first GPR was already
12566 passed in the last FPR as well.
12568 For unnamed arguments, we already set up GPRs to cover the
12569 whole argument in rs6000_psave_function_arg, so there is
12570 nothing further to do at this point. */
12571 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12572 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12573 && cum->nargs_prototype > 0)
12575 static bool warned;
12577 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12578 int n_words = rs6000_arg_size (mode, type);
12580 align_words += fpr_words;
12581 n_words -= fpr_words;
12585 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12586 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12587 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12589 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12591 if (!warned && warn_psabi)
12593 warned = true;
12594 inform (input_location,
12595 "the ABI of passing homogeneous float aggregates"
12596 " has changed in GCC 5");
12600 return rs6000_finish_function_arg (mode, rvec, k);
12602 else if (align_words < GP_ARG_NUM_REG)
12604 if (TARGET_32BIT && TARGET_POWERPC64)
12605 return rs6000_mixed_function_arg (mode, type, align_words);
12607 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12609 else
12610 return NULL_RTX;
12614 /* For an arg passed partly in registers and partly in memory, this is
12615 the number of bytes passed in registers. For args passed entirely in
12616 registers or entirely in memory, zero. When an arg is described by a
12617 PARALLEL, perhaps using more than one register type, this function
12618 returns the number of bytes used by the first element of the PARALLEL. */
12620 static int
12621 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12622 tree type, bool named)
12624 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12625 bool passed_in_gprs = true;
12626 int ret = 0;
12627 int align_words;
12628 machine_mode elt_mode;
12629 int n_elts;
12631 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12633 if (DEFAULT_ABI == ABI_V4)
12634 return 0;
12636 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12638 /* If we are passing this arg in the fixed parameter save area (gprs or
12639 memory) as well as VRs, we do not use the partial bytes mechanism;
12640 instead, rs6000_function_arg will return a PARALLEL including a memory
12641 element as necessary. Library support functions for IEEE 128-bit are
12642 assumed to not need the value passed both in GPRs and in vector
12643 registers. */
12644 if (TARGET_64BIT && !cum->prototype
12645 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12646 return 0;
12648 /* Otherwise, we pass in VRs only. Check for partial copies. */
12649 passed_in_gprs = false;
12650 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12651 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12654 /* In this complicated case we just disable the partial_nregs code. */
12655 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12656 return 0;
12658 align_words = rs6000_parm_start (mode, type, cum->words);
12660 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12662 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12664 /* If we are passing this arg in the fixed parameter save area
12665 (gprs or memory) as well as FPRs, we do not use the partial
12666 bytes mechanism; instead, rs6000_function_arg will return a
12667 PARALLEL including a memory element as necessary. */
12668 if (type
12669 && (cum->nargs_prototype <= 0
12670 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12671 && TARGET_XL_COMPAT
12672 && align_words >= GP_ARG_NUM_REG)))
12673 return 0;
12675 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12676 passed_in_gprs = false;
12677 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12679 /* Compute number of bytes / words passed in FPRs. If there
12680 is still space available in the register parameter area
12681 *after* that amount, a part of the argument will be passed
12682 in GPRs. In that case, the total amount passed in any
12683 registers is equal to the amount that would have been passed
12684 in GPRs if everything were passed there, so we fall back to
12685 the GPR code below to compute the appropriate value. */
12686 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12687 * MIN (8, GET_MODE_SIZE (elt_mode)));
12688 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12690 if (align_words + fpr_words < GP_ARG_NUM_REG)
12691 passed_in_gprs = true;
12692 else
12693 ret = fpr;
12697 if (passed_in_gprs
12698 && align_words < GP_ARG_NUM_REG
12699 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12700 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12702 if (ret != 0 && TARGET_DEBUG_ARG)
12703 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12705 return ret;
12708 /* A C expression that indicates when an argument must be passed by
12709 reference. If nonzero for an argument, a copy of that argument is
12710 made in memory and a pointer to the argument is passed instead of
12711 the argument itself. The pointer is passed in whatever way is
12712 appropriate for passing a pointer to that type.
12714 Under V.4, aggregates and long double are passed by reference.
12716 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12717 reference unless the AltiVec vector extension ABI is in force.
12719 As an extension to all ABIs, variable sized types are passed by
12720 reference. */
12722 static bool
12723 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12724 machine_mode mode, const_tree type,
12725 bool named ATTRIBUTE_UNUSED)
12727 if (!type)
12728 return 0;
12730 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12731 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12733 if (TARGET_DEBUG_ARG)
12734 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12735 return 1;
12738 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12742 return 1;
12745 if (int_size_in_bytes (type) < 0)
12747 if (TARGET_DEBUG_ARG)
12748 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12749 return 1;
12752 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12753 modes only exist for GCC vector types if -maltivec. */
12754 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12756 if (TARGET_DEBUG_ARG)
12757 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12758 return 1;
12761 /* Pass synthetic vectors in memory. */
12762 if (TREE_CODE (type) == VECTOR_TYPE
12763 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12765 static bool warned_for_pass_big_vectors = false;
12766 if (TARGET_DEBUG_ARG)
12767 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12768 if (!warned_for_pass_big_vectors)
12770 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12771 "non-standard ABI extension with no compatibility "
12772 "guarantee");
12773 warned_for_pass_big_vectors = true;
12775 return 1;
12778 return 0;
12781 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12782 already processes. Return true if the parameter must be passed
12783 (fully or partially) on the stack. */
12785 static bool
12786 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12788 machine_mode mode;
12789 int unsignedp;
12790 rtx entry_parm;
12792 /* Catch errors. */
12793 if (type == NULL || type == error_mark_node)
12794 return true;
12796 /* Handle types with no storage requirement. */
12797 if (TYPE_MODE (type) == VOIDmode)
12798 return false;
12800 /* Handle complex types. */
12801 if (TREE_CODE (type) == COMPLEX_TYPE)
12802 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12803 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12805 /* Handle transparent aggregates. */
12806 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12807 && TYPE_TRANSPARENT_AGGR (type))
12808 type = TREE_TYPE (first_field (type));
12810 /* See if this arg was passed by invisible reference. */
12811 if (pass_by_reference (get_cumulative_args (args_so_far),
12812 TYPE_MODE (type), type, true))
12813 type = build_pointer_type (type);
12815 /* Find mode as it is passed by the ABI. */
12816 unsignedp = TYPE_UNSIGNED (type);
12817 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12819 /* If we must pass in stack, we need a stack. */
12820 if (rs6000_must_pass_in_stack (mode, type))
12821 return true;
12823 /* If there is no incoming register, we need a stack. */
12824 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12825 if (entry_parm == NULL)
12826 return true;
12828 /* Likewise if we need to pass both in registers and on the stack. */
12829 if (GET_CODE (entry_parm) == PARALLEL
12830 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12831 return true;
12833 /* Also true if we're partially in registers and partially not. */
12834 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12835 return true;
12837 /* Update info on where next arg arrives in registers. */
12838 rs6000_function_arg_advance (args_so_far, mode, type, true);
12839 return false;
12842 /* Return true if FUN has no prototype, has a variable argument
12843 list, or passes any parameter in memory. */
12845 static bool
12846 rs6000_function_parms_need_stack (tree fun, bool incoming)
12848 tree fntype, result;
12849 CUMULATIVE_ARGS args_so_far_v;
12850 cumulative_args_t args_so_far;
12852 if (!fun)
12853 /* Must be a libcall, all of which only use reg parms. */
12854 return false;
12856 fntype = fun;
12857 if (!TYPE_P (fun))
12858 fntype = TREE_TYPE (fun);
12860 /* Varargs functions need the parameter save area. */
12861 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12862 return true;
12864 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12865 args_so_far = pack_cumulative_args (&args_so_far_v);
12867 /* When incoming, we will have been passed the function decl.
12868 It is necessary to use the decl to handle K&R style functions,
12869 where TYPE_ARG_TYPES may not be available. */
12870 if (incoming)
12872 gcc_assert (DECL_P (fun));
12873 result = DECL_RESULT (fun);
12875 else
12876 result = TREE_TYPE (fntype);
12878 if (result && aggregate_value_p (result, fntype))
12880 if (!TYPE_P (result))
12881 result = TREE_TYPE (result);
12882 result = build_pointer_type (result);
12883 rs6000_parm_needs_stack (args_so_far, result);
12886 if (incoming)
12888 tree parm;
12890 for (parm = DECL_ARGUMENTS (fun);
12891 parm && parm != void_list_node;
12892 parm = TREE_CHAIN (parm))
12893 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12894 return true;
12896 else
12898 function_args_iterator args_iter;
12899 tree arg_type;
12901 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12902 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12903 return true;
12906 return false;
12909 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12910 usually a constant depending on the ABI. However, in the ELFv2 ABI
12911 the register parameter area is optional when calling a function that
12912 has a prototype is scope, has no variable argument list, and passes
12913 all parameters in registers. */
12916 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12918 int reg_parm_stack_space;
12920 switch (DEFAULT_ABI)
12922 default:
12923 reg_parm_stack_space = 0;
12924 break;
12926 case ABI_AIX:
12927 case ABI_DARWIN:
12928 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12929 break;
12931 case ABI_ELFv2:
12932 /* ??? Recomputing this every time is a bit expensive. Is there
12933 a place to cache this information? */
12934 if (rs6000_function_parms_need_stack (fun, incoming))
12935 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12936 else
12937 reg_parm_stack_space = 0;
12938 break;
12941 return reg_parm_stack_space;
12944 static void
12945 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12947 int i;
12948 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12950 if (nregs == 0)
12951 return;
12953 for (i = 0; i < nregs; i++)
12955 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12956 if (reload_completed)
12958 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12959 tem = NULL_RTX;
12960 else
12961 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12962 i * GET_MODE_SIZE (reg_mode));
12964 else
12965 tem = replace_equiv_address (tem, XEXP (tem, 0));
12967 gcc_assert (tem);
12969 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12973 /* Perform any needed actions needed for a function that is receiving a
12974 variable number of arguments.
12976 CUM is as above.
12978 MODE and TYPE are the mode and type of the current parameter.
12980 PRETEND_SIZE is a variable that should be set to the amount of stack
12981 that must be pushed by the prolog to pretend that our caller pushed
12984 Normally, this macro will push all remaining incoming registers on the
12985 stack and set PRETEND_SIZE to the length of the registers pushed. */
12987 static void
12988 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12989 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12990 int no_rtl)
12992 CUMULATIVE_ARGS next_cum;
12993 int reg_size = TARGET_32BIT ? 4 : 8;
12994 rtx save_area = NULL_RTX, mem;
12995 int first_reg_offset;
12996 alias_set_type set;
12998 /* Skip the last named argument. */
12999 next_cum = *get_cumulative_args (cum);
13000 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13002 if (DEFAULT_ABI == ABI_V4)
13004 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13006 if (! no_rtl)
13008 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13009 HOST_WIDE_INT offset = 0;
13011 /* Try to optimize the size of the varargs save area.
13012 The ABI requires that ap.reg_save_area is doubleword
13013 aligned, but we don't need to allocate space for all
13014 the bytes, only those to which we actually will save
13015 anything. */
13016 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13017 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13018 if (TARGET_HARD_FLOAT
13019 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13020 && cfun->va_list_fpr_size)
13022 if (gpr_reg_num)
13023 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13024 * UNITS_PER_FP_WORD;
13025 if (cfun->va_list_fpr_size
13026 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13027 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13028 else
13029 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13030 * UNITS_PER_FP_WORD;
13032 if (gpr_reg_num)
13034 offset = -((first_reg_offset * reg_size) & ~7);
13035 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13037 gpr_reg_num = cfun->va_list_gpr_size;
13038 if (reg_size == 4 && (first_reg_offset & 1))
13039 gpr_reg_num++;
13041 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13043 else if (fpr_size)
13044 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13045 * UNITS_PER_FP_WORD
13046 - (int) (GP_ARG_NUM_REG * reg_size);
13048 if (gpr_size + fpr_size)
13050 rtx reg_save_area
13051 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13052 gcc_assert (GET_CODE (reg_save_area) == MEM);
13053 reg_save_area = XEXP (reg_save_area, 0);
13054 if (GET_CODE (reg_save_area) == PLUS)
13056 gcc_assert (XEXP (reg_save_area, 0)
13057 == virtual_stack_vars_rtx);
13058 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13059 offset += INTVAL (XEXP (reg_save_area, 1));
13061 else
13062 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13065 cfun->machine->varargs_save_offset = offset;
13066 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13069 else
13071 first_reg_offset = next_cum.words;
13072 save_area = crtl->args.internal_arg_pointer;
13074 if (targetm.calls.must_pass_in_stack (mode, type))
13075 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13078 set = get_varargs_alias_set ();
13079 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13080 && cfun->va_list_gpr_size)
13082 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13084 if (va_list_gpr_counter_field)
13085 /* V4 va_list_gpr_size counts number of registers needed. */
13086 n_gpr = cfun->va_list_gpr_size;
13087 else
13088 /* char * va_list instead counts number of bytes needed. */
13089 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13091 if (nregs > n_gpr)
13092 nregs = n_gpr;
13094 mem = gen_rtx_MEM (BLKmode,
13095 plus_constant (Pmode, save_area,
13096 first_reg_offset * reg_size));
13097 MEM_NOTRAP_P (mem) = 1;
13098 set_mem_alias_set (mem, set);
13099 set_mem_align (mem, BITS_PER_WORD);
13101 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13102 nregs);
13105 /* Save FP registers if needed. */
13106 if (DEFAULT_ABI == ABI_V4
13107 && TARGET_HARD_FLOAT
13108 && ! no_rtl
13109 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13110 && cfun->va_list_fpr_size)
13112 int fregno = next_cum.fregno, nregs;
13113 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13114 rtx lab = gen_label_rtx ();
13115 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13116 * UNITS_PER_FP_WORD);
13118 emit_jump_insn
13119 (gen_rtx_SET (pc_rtx,
13120 gen_rtx_IF_THEN_ELSE (VOIDmode,
13121 gen_rtx_NE (VOIDmode, cr1,
13122 const0_rtx),
13123 gen_rtx_LABEL_REF (VOIDmode, lab),
13124 pc_rtx)));
13126 for (nregs = 0;
13127 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13128 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13130 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13131 ? DFmode : SFmode,
13132 plus_constant (Pmode, save_area, off));
13133 MEM_NOTRAP_P (mem) = 1;
13134 set_mem_alias_set (mem, set);
13135 set_mem_align (mem, GET_MODE_ALIGNMENT (
13136 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13137 ? DFmode : SFmode));
13138 emit_move_insn (mem, gen_rtx_REG (
13139 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13140 ? DFmode : SFmode, fregno));
13143 emit_label (lab);
13147 /* Create the va_list data type. */
13149 static tree
13150 rs6000_build_builtin_va_list (void)
13152 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13154 /* For AIX, prefer 'char *' because that's what the system
13155 header files like. */
13156 if (DEFAULT_ABI != ABI_V4)
13157 return build_pointer_type (char_type_node);
13159 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13160 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13161 get_identifier ("__va_list_tag"), record);
13163 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13164 unsigned_char_type_node);
13165 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13166 unsigned_char_type_node);
13167 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13168 every user file. */
13169 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13170 get_identifier ("reserved"), short_unsigned_type_node);
13171 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13172 get_identifier ("overflow_arg_area"),
13173 ptr_type_node);
13174 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13175 get_identifier ("reg_save_area"),
13176 ptr_type_node);
13178 va_list_gpr_counter_field = f_gpr;
13179 va_list_fpr_counter_field = f_fpr;
13181 DECL_FIELD_CONTEXT (f_gpr) = record;
13182 DECL_FIELD_CONTEXT (f_fpr) = record;
13183 DECL_FIELD_CONTEXT (f_res) = record;
13184 DECL_FIELD_CONTEXT (f_ovf) = record;
13185 DECL_FIELD_CONTEXT (f_sav) = record;
13187 TYPE_STUB_DECL (record) = type_decl;
13188 TYPE_NAME (record) = type_decl;
13189 TYPE_FIELDS (record) = f_gpr;
13190 DECL_CHAIN (f_gpr) = f_fpr;
13191 DECL_CHAIN (f_fpr) = f_res;
13192 DECL_CHAIN (f_res) = f_ovf;
13193 DECL_CHAIN (f_ovf) = f_sav;
13195 layout_type (record);
13197 /* The correct type is an array type of one element. */
13198 return build_array_type (record, build_index_type (size_zero_node));
13201 /* Implement va_start. */
13203 static void
13204 rs6000_va_start (tree valist, rtx nextarg)
13206 HOST_WIDE_INT words, n_gpr, n_fpr;
13207 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13208 tree gpr, fpr, ovf, sav, t;
13210 /* Only SVR4 needs something special. */
13211 if (DEFAULT_ABI != ABI_V4)
13213 std_expand_builtin_va_start (valist, nextarg);
13214 return;
13217 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13218 f_fpr = DECL_CHAIN (f_gpr);
13219 f_res = DECL_CHAIN (f_fpr);
13220 f_ovf = DECL_CHAIN (f_res);
13221 f_sav = DECL_CHAIN (f_ovf);
13223 valist = build_simple_mem_ref (valist);
13224 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13225 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13226 f_fpr, NULL_TREE);
13227 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13228 f_ovf, NULL_TREE);
13229 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13230 f_sav, NULL_TREE);
13232 /* Count number of gp and fp argument registers used. */
13233 words = crtl->args.info.words;
13234 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13235 GP_ARG_NUM_REG);
13236 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13237 FP_ARG_NUM_REG);
13239 if (TARGET_DEBUG_ARG)
13240 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13241 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13242 words, n_gpr, n_fpr);
13244 if (cfun->va_list_gpr_size)
13246 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13247 build_int_cst (NULL_TREE, n_gpr));
13248 TREE_SIDE_EFFECTS (t) = 1;
13249 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13252 if (cfun->va_list_fpr_size)
13254 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13255 build_int_cst (NULL_TREE, n_fpr));
13256 TREE_SIDE_EFFECTS (t) = 1;
13257 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13259 #ifdef HAVE_AS_GNU_ATTRIBUTE
13260 if (call_ABI_of_interest (cfun->decl))
13261 rs6000_passes_float = true;
13262 #endif
13265 /* Find the overflow area. */
13266 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13267 if (words != 0)
13268 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13269 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13270 TREE_SIDE_EFFECTS (t) = 1;
13271 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13273 /* If there were no va_arg invocations, don't set up the register
13274 save area. */
13275 if (!cfun->va_list_gpr_size
13276 && !cfun->va_list_fpr_size
13277 && n_gpr < GP_ARG_NUM_REG
13278 && n_fpr < FP_ARG_V4_MAX_REG)
13279 return;
13281 /* Find the register save area. */
13282 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13283 if (cfun->machine->varargs_save_offset)
13284 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13285 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13286 TREE_SIDE_EFFECTS (t) = 1;
13287 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13290 /* Implement va_arg. */
13292 static tree
13293 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13294 gimple_seq *post_p)
13296 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13297 tree gpr, fpr, ovf, sav, reg, t, u;
13298 int size, rsize, n_reg, sav_ofs, sav_scale;
13299 tree lab_false, lab_over, addr;
13300 int align;
13301 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13302 int regalign = 0;
13303 gimple *stmt;
13305 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13307 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13308 return build_va_arg_indirect_ref (t);
13311 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13312 earlier version of gcc, with the property that it always applied alignment
13313 adjustments to the va-args (even for zero-sized types). The cheapest way
13314 to deal with this is to replicate the effect of the part of
13315 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13316 of relevance.
13317 We don't need to check for pass-by-reference because of the test above.
13318 We can return a simplifed answer, since we know there's no offset to add. */
13320 if (((TARGET_MACHO
13321 && rs6000_darwin64_abi)
13322 || DEFAULT_ABI == ABI_ELFv2
13323 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13324 && integer_zerop (TYPE_SIZE (type)))
13326 unsigned HOST_WIDE_INT align, boundary;
13327 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13328 align = PARM_BOUNDARY / BITS_PER_UNIT;
13329 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13330 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13331 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13332 boundary /= BITS_PER_UNIT;
13333 if (boundary > align)
13335 tree t ;
13336 /* This updates arg ptr by the amount that would be necessary
13337 to align the zero-sized (but not zero-alignment) item. */
13338 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13339 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13340 gimplify_and_add (t, pre_p);
13342 t = fold_convert (sizetype, valist_tmp);
13343 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13344 fold_convert (TREE_TYPE (valist),
13345 fold_build2 (BIT_AND_EXPR, sizetype, t,
13346 size_int (-boundary))));
13347 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13348 gimplify_and_add (t, pre_p);
13350 /* Since it is zero-sized there's no increment for the item itself. */
13351 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13352 return build_va_arg_indirect_ref (valist_tmp);
13355 if (DEFAULT_ABI != ABI_V4)
13357 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13359 tree elem_type = TREE_TYPE (type);
13360 machine_mode elem_mode = TYPE_MODE (elem_type);
13361 int elem_size = GET_MODE_SIZE (elem_mode);
13363 if (elem_size < UNITS_PER_WORD)
13365 tree real_part, imag_part;
13366 gimple_seq post = NULL;
13368 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13369 &post);
13370 /* Copy the value into a temporary, lest the formal temporary
13371 be reused out from under us. */
13372 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13373 gimple_seq_add_seq (pre_p, post);
13375 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13376 post_p);
13378 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13382 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13385 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13386 f_fpr = DECL_CHAIN (f_gpr);
13387 f_res = DECL_CHAIN (f_fpr);
13388 f_ovf = DECL_CHAIN (f_res);
13389 f_sav = DECL_CHAIN (f_ovf);
13391 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13392 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13393 f_fpr, NULL_TREE);
13394 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13395 f_ovf, NULL_TREE);
13396 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13397 f_sav, NULL_TREE);
13399 size = int_size_in_bytes (type);
13400 rsize = (size + 3) / 4;
13401 int pad = 4 * rsize - size;
13402 align = 1;
13404 machine_mode mode = TYPE_MODE (type);
13405 if (abi_v4_pass_in_fpr (mode, false))
13407 /* FP args go in FP registers, if present. */
13408 reg = fpr;
13409 n_reg = (size + 7) / 8;
13410 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13411 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13412 if (mode != SFmode && mode != SDmode)
13413 align = 8;
13415 else
13417 /* Otherwise into GP registers. */
13418 reg = gpr;
13419 n_reg = rsize;
13420 sav_ofs = 0;
13421 sav_scale = 4;
13422 if (n_reg == 2)
13423 align = 8;
13426 /* Pull the value out of the saved registers.... */
13428 lab_over = NULL;
13429 addr = create_tmp_var (ptr_type_node, "addr");
13431 /* AltiVec vectors never go in registers when -mabi=altivec. */
13432 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13433 align = 16;
13434 else
13436 lab_false = create_artificial_label (input_location);
13437 lab_over = create_artificial_label (input_location);
13439 /* Long long is aligned in the registers. As are any other 2 gpr
13440 item such as complex int due to a historical mistake. */
13441 u = reg;
13442 if (n_reg == 2 && reg == gpr)
13444 regalign = 1;
13445 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13446 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13447 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13448 unshare_expr (reg), u);
13450 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13451 reg number is 0 for f1, so we want to make it odd. */
13452 else if (reg == fpr && mode == TDmode)
13454 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13455 build_int_cst (TREE_TYPE (reg), 1));
13456 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13459 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13460 t = build2 (GE_EXPR, boolean_type_node, u, t);
13461 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13462 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13463 gimplify_and_add (t, pre_p);
13465 t = sav;
13466 if (sav_ofs)
13467 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13469 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13470 build_int_cst (TREE_TYPE (reg), n_reg));
13471 u = fold_convert (sizetype, u);
13472 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13473 t = fold_build_pointer_plus (t, u);
13475 /* _Decimal32 varargs are located in the second word of the 64-bit
13476 FP register for 32-bit binaries. */
13477 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13478 t = fold_build_pointer_plus_hwi (t, size);
13480 /* Args are passed right-aligned. */
13481 if (BYTES_BIG_ENDIAN)
13482 t = fold_build_pointer_plus_hwi (t, pad);
13484 gimplify_assign (addr, t, pre_p);
13486 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13488 stmt = gimple_build_label (lab_false);
13489 gimple_seq_add_stmt (pre_p, stmt);
13491 if ((n_reg == 2 && !regalign) || n_reg > 2)
13493 /* Ensure that we don't find any more args in regs.
13494 Alignment has taken care of for special cases. */
13495 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13499 /* ... otherwise out of the overflow area. */
13501 /* Care for on-stack alignment if needed. */
13502 t = ovf;
13503 if (align != 1)
13505 t = fold_build_pointer_plus_hwi (t, align - 1);
13506 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13507 build_int_cst (TREE_TYPE (t), -align));
13510 /* Args are passed right-aligned. */
13511 if (BYTES_BIG_ENDIAN)
13512 t = fold_build_pointer_plus_hwi (t, pad);
13514 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13516 gimplify_assign (unshare_expr (addr), t, pre_p);
13518 t = fold_build_pointer_plus_hwi (t, size);
13519 gimplify_assign (unshare_expr (ovf), t, pre_p);
13521 if (lab_over)
13523 stmt = gimple_build_label (lab_over);
13524 gimple_seq_add_stmt (pre_p, stmt);
13527 if (STRICT_ALIGNMENT
13528 && (TYPE_ALIGN (type)
13529 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13531 /* The value (of type complex double, for example) may not be
13532 aligned in memory in the saved registers, so copy via a
13533 temporary. (This is the same code as used for SPARC.) */
13534 tree tmp = create_tmp_var (type, "va_arg_tmp");
13535 tree dest_addr = build_fold_addr_expr (tmp);
13537 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13538 3, dest_addr, addr, size_int (rsize * 4));
13539 TREE_ADDRESSABLE (tmp) = 1;
13541 gimplify_and_add (copy, pre_p);
13542 addr = dest_addr;
13545 addr = fold_convert (ptrtype, addr);
13546 return build_va_arg_indirect_ref (addr);
13549 /* Builtins. */
13551 static void
13552 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13554 tree t;
13555 unsigned classify = rs6000_builtin_info[(int)code].attr;
13556 const char *attr_string = "";
13558 gcc_assert (name != NULL);
13559 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13561 if (rs6000_builtin_decls[(int)code])
13562 fatal_error (input_location,
13563 "internal error: builtin function %qs already processed",
13564 name);
13566 rs6000_builtin_decls[(int)code] = t =
13567 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13569 /* Set any special attributes. */
13570 if ((classify & RS6000_BTC_CONST) != 0)
13572 /* const function, function only depends on the inputs. */
13573 TREE_READONLY (t) = 1;
13574 TREE_NOTHROW (t) = 1;
13575 attr_string = ", const";
13577 else if ((classify & RS6000_BTC_PURE) != 0)
13579 /* pure function, function can read global memory, but does not set any
13580 external state. */
13581 DECL_PURE_P (t) = 1;
13582 TREE_NOTHROW (t) = 1;
13583 attr_string = ", pure";
13585 else if ((classify & RS6000_BTC_FP) != 0)
13587 /* Function is a math function. If rounding mode is on, then treat the
13588 function as not reading global memory, but it can have arbitrary side
13589 effects. If it is off, then assume the function is a const function.
13590 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13591 builtin-attribute.def that is used for the math functions. */
13592 TREE_NOTHROW (t) = 1;
13593 if (flag_rounding_math)
13595 DECL_PURE_P (t) = 1;
13596 DECL_IS_NOVOPS (t) = 1;
13597 attr_string = ", fp, pure";
13599 else
13601 TREE_READONLY (t) = 1;
13602 attr_string = ", fp, const";
13605 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13606 gcc_unreachable ();
13608 if (TARGET_DEBUG_BUILTIN)
13609 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13610 (int)code, name, attr_string);
13613 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13615 #undef RS6000_BUILTIN_0
13616 #undef RS6000_BUILTIN_1
13617 #undef RS6000_BUILTIN_2
13618 #undef RS6000_BUILTIN_3
13619 #undef RS6000_BUILTIN_A
13620 #undef RS6000_BUILTIN_D
13621 #undef RS6000_BUILTIN_H
13622 #undef RS6000_BUILTIN_P
13623 #undef RS6000_BUILTIN_Q
13624 #undef RS6000_BUILTIN_X
13626 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13627 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13628 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13629 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13630 { MASK, ICODE, NAME, ENUM },
13632 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13635 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13639 static const struct builtin_description bdesc_3arg[] =
13641 #include "rs6000-builtin.def"
13644 /* DST operations: void foo (void *, const int, const char). */
13646 #undef RS6000_BUILTIN_0
13647 #undef RS6000_BUILTIN_1
13648 #undef RS6000_BUILTIN_2
13649 #undef RS6000_BUILTIN_3
13650 #undef RS6000_BUILTIN_A
13651 #undef RS6000_BUILTIN_D
13652 #undef RS6000_BUILTIN_H
13653 #undef RS6000_BUILTIN_P
13654 #undef RS6000_BUILTIN_Q
13655 #undef RS6000_BUILTIN_X
13657 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13658 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13659 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13663 { MASK, ICODE, NAME, ENUM },
13665 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13666 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13670 static const struct builtin_description bdesc_dst[] =
13672 #include "rs6000-builtin.def"
13675 /* Simple binary operations: VECc = foo (VECa, VECb). */
13677 #undef RS6000_BUILTIN_0
13678 #undef RS6000_BUILTIN_1
13679 #undef RS6000_BUILTIN_2
13680 #undef RS6000_BUILTIN_3
13681 #undef RS6000_BUILTIN_A
13682 #undef RS6000_BUILTIN_D
13683 #undef RS6000_BUILTIN_H
13684 #undef RS6000_BUILTIN_P
13685 #undef RS6000_BUILTIN_Q
13686 #undef RS6000_BUILTIN_X
13688 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13689 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13691 { MASK, ICODE, NAME, ENUM },
13693 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13696 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13701 static const struct builtin_description bdesc_2arg[] =
13703 #include "rs6000-builtin.def"
13706 #undef RS6000_BUILTIN_0
13707 #undef RS6000_BUILTIN_1
13708 #undef RS6000_BUILTIN_2
13709 #undef RS6000_BUILTIN_3
13710 #undef RS6000_BUILTIN_A
13711 #undef RS6000_BUILTIN_D
13712 #undef RS6000_BUILTIN_H
13713 #undef RS6000_BUILTIN_P
13714 #undef RS6000_BUILTIN_Q
13715 #undef RS6000_BUILTIN_X
13717 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13718 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13719 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13721 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13722 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13725 { MASK, ICODE, NAME, ENUM },
13727 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13730 /* AltiVec predicates. */
13732 static const struct builtin_description bdesc_altivec_preds[] =
13734 #include "rs6000-builtin.def"
13737 /* PAIRED predicates. */
13738 #undef RS6000_BUILTIN_0
13739 #undef RS6000_BUILTIN_1
13740 #undef RS6000_BUILTIN_2
13741 #undef RS6000_BUILTIN_3
13742 #undef RS6000_BUILTIN_A
13743 #undef RS6000_BUILTIN_D
13744 #undef RS6000_BUILTIN_H
13745 #undef RS6000_BUILTIN_P
13746 #undef RS6000_BUILTIN_Q
13747 #undef RS6000_BUILTIN_X
13749 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13750 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13751 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13752 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13753 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13758 { MASK, ICODE, NAME, ENUM },
13760 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13762 static const struct builtin_description bdesc_paired_preds[] =
13764 #include "rs6000-builtin.def"
13767 /* ABS* operations. */
13769 #undef RS6000_BUILTIN_0
13770 #undef RS6000_BUILTIN_1
13771 #undef RS6000_BUILTIN_2
13772 #undef RS6000_BUILTIN_3
13773 #undef RS6000_BUILTIN_A
13774 #undef RS6000_BUILTIN_D
13775 #undef RS6000_BUILTIN_H
13776 #undef RS6000_BUILTIN_P
13777 #undef RS6000_BUILTIN_Q
13778 #undef RS6000_BUILTIN_X
13780 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13781 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13782 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13783 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13785 { MASK, ICODE, NAME, ENUM },
13787 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13788 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13789 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13793 static const struct builtin_description bdesc_abs[] =
13795 #include "rs6000-builtin.def"
13798 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13799 foo (VECa). */
13801 #undef RS6000_BUILTIN_0
13802 #undef RS6000_BUILTIN_1
13803 #undef RS6000_BUILTIN_2
13804 #undef RS6000_BUILTIN_3
13805 #undef RS6000_BUILTIN_A
13806 #undef RS6000_BUILTIN_D
13807 #undef RS6000_BUILTIN_H
13808 #undef RS6000_BUILTIN_P
13809 #undef RS6000_BUILTIN_Q
13810 #undef RS6000_BUILTIN_X
13812 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13814 { MASK, ICODE, NAME, ENUM },
13816 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13818 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13819 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13825 static const struct builtin_description bdesc_1arg[] =
13827 #include "rs6000-builtin.def"
13830 /* Simple no-argument operations: result = __builtin_darn_32 () */
13832 #undef RS6000_BUILTIN_0
13833 #undef RS6000_BUILTIN_1
13834 #undef RS6000_BUILTIN_2
13835 #undef RS6000_BUILTIN_3
13836 #undef RS6000_BUILTIN_A
13837 #undef RS6000_BUILTIN_D
13838 #undef RS6000_BUILTIN_H
13839 #undef RS6000_BUILTIN_P
13840 #undef RS6000_BUILTIN_Q
13841 #undef RS6000_BUILTIN_X
13843 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13844 { MASK, ICODE, NAME, ENUM },
13846 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13847 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13853 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13854 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13856 static const struct builtin_description bdesc_0arg[] =
13858 #include "rs6000-builtin.def"
13861 /* HTM builtins. */
13862 #undef RS6000_BUILTIN_0
13863 #undef RS6000_BUILTIN_1
13864 #undef RS6000_BUILTIN_2
13865 #undef RS6000_BUILTIN_3
13866 #undef RS6000_BUILTIN_A
13867 #undef RS6000_BUILTIN_D
13868 #undef RS6000_BUILTIN_H
13869 #undef RS6000_BUILTIN_P
13870 #undef RS6000_BUILTIN_Q
13871 #undef RS6000_BUILTIN_X
13873 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13874 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13875 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13876 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13877 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13878 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13879 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13880 { MASK, ICODE, NAME, ENUM },
13882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13883 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13884 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13886 static const struct builtin_description bdesc_htm[] =
13888 #include "rs6000-builtin.def"
13891 #undef RS6000_BUILTIN_0
13892 #undef RS6000_BUILTIN_1
13893 #undef RS6000_BUILTIN_2
13894 #undef RS6000_BUILTIN_3
13895 #undef RS6000_BUILTIN_A
13896 #undef RS6000_BUILTIN_D
13897 #undef RS6000_BUILTIN_H
13898 #undef RS6000_BUILTIN_P
13899 #undef RS6000_BUILTIN_Q
13901 /* Return true if a builtin function is overloaded. */
13902 bool
13903 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13905 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13908 const char *
13909 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13911 return rs6000_builtin_info[(int)fncode].name;
13914 /* Expand an expression EXP that calls a builtin without arguments. */
13915 static rtx
13916 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13918 rtx pat;
13919 machine_mode tmode = insn_data[icode].operand[0].mode;
13921 if (icode == CODE_FOR_nothing)
13922 /* Builtin not supported on this processor. */
13923 return 0;
13925 if (target == 0
13926 || GET_MODE (target) != tmode
13927 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13928 target = gen_reg_rtx (tmode);
13930 pat = GEN_FCN (icode) (target);
13931 if (! pat)
13932 return 0;
13933 emit_insn (pat);
13935 return target;
13939 static rtx
13940 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13942 rtx pat;
13943 tree arg0 = CALL_EXPR_ARG (exp, 0);
13944 tree arg1 = CALL_EXPR_ARG (exp, 1);
13945 rtx op0 = expand_normal (arg0);
13946 rtx op1 = expand_normal (arg1);
13947 machine_mode mode0 = insn_data[icode].operand[0].mode;
13948 machine_mode mode1 = insn_data[icode].operand[1].mode;
13950 if (icode == CODE_FOR_nothing)
13951 /* Builtin not supported on this processor. */
13952 return 0;
13954 /* If we got invalid arguments bail out before generating bad rtl. */
13955 if (arg0 == error_mark_node || arg1 == error_mark_node)
13956 return const0_rtx;
13958 if (GET_CODE (op0) != CONST_INT
13959 || INTVAL (op0) > 255
13960 || INTVAL (op0) < 0)
13962 error ("argument 1 must be an 8-bit field value");
13963 return const0_rtx;
13966 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13967 op0 = copy_to_mode_reg (mode0, op0);
13969 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13970 op1 = copy_to_mode_reg (mode1, op1);
13972 pat = GEN_FCN (icode) (op0, op1);
13973 if (! pat)
13974 return const0_rtx;
13975 emit_insn (pat);
13977 return NULL_RTX;
13980 static rtx
13981 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13983 rtx pat;
13984 tree arg0 = CALL_EXPR_ARG (exp, 0);
13985 rtx op0 = expand_normal (arg0);
13986 machine_mode tmode = insn_data[icode].operand[0].mode;
13987 machine_mode mode0 = insn_data[icode].operand[1].mode;
13989 if (icode == CODE_FOR_nothing)
13990 /* Builtin not supported on this processor. */
13991 return 0;
13993 /* If we got invalid arguments bail out before generating bad rtl. */
13994 if (arg0 == error_mark_node)
13995 return const0_rtx;
13997 if (icode == CODE_FOR_altivec_vspltisb
13998 || icode == CODE_FOR_altivec_vspltish
13999 || icode == CODE_FOR_altivec_vspltisw)
14001 /* Only allow 5-bit *signed* literals. */
14002 if (GET_CODE (op0) != CONST_INT
14003 || INTVAL (op0) > 15
14004 || INTVAL (op0) < -16)
14006 error ("argument 1 must be a 5-bit signed literal");
14007 return CONST0_RTX (tmode);
14011 if (target == 0
14012 || GET_MODE (target) != tmode
14013 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14014 target = gen_reg_rtx (tmode);
14016 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14017 op0 = copy_to_mode_reg (mode0, op0);
14019 pat = GEN_FCN (icode) (target, op0);
14020 if (! pat)
14021 return 0;
14022 emit_insn (pat);
14024 return target;
14027 static rtx
14028 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14030 rtx pat, scratch1, scratch2;
14031 tree arg0 = CALL_EXPR_ARG (exp, 0);
14032 rtx op0 = expand_normal (arg0);
14033 machine_mode tmode = insn_data[icode].operand[0].mode;
14034 machine_mode mode0 = insn_data[icode].operand[1].mode;
14036 /* If we have invalid arguments, bail out before generating bad rtl. */
14037 if (arg0 == error_mark_node)
14038 return const0_rtx;
14040 if (target == 0
14041 || GET_MODE (target) != tmode
14042 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14043 target = gen_reg_rtx (tmode);
14045 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14046 op0 = copy_to_mode_reg (mode0, op0);
14048 scratch1 = gen_reg_rtx (mode0);
14049 scratch2 = gen_reg_rtx (mode0);
14051 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14052 if (! pat)
14053 return 0;
14054 emit_insn (pat);
14056 return target;
14059 static rtx
14060 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14062 rtx pat;
14063 tree arg0 = CALL_EXPR_ARG (exp, 0);
14064 tree arg1 = CALL_EXPR_ARG (exp, 1);
14065 rtx op0 = expand_normal (arg0);
14066 rtx op1 = expand_normal (arg1);
14067 machine_mode tmode = insn_data[icode].operand[0].mode;
14068 machine_mode mode0 = insn_data[icode].operand[1].mode;
14069 machine_mode mode1 = insn_data[icode].operand[2].mode;
14071 if (icode == CODE_FOR_nothing)
14072 /* Builtin not supported on this processor. */
14073 return 0;
14075 /* If we got invalid arguments bail out before generating bad rtl. */
14076 if (arg0 == error_mark_node || arg1 == error_mark_node)
14077 return const0_rtx;
14079 if (icode == CODE_FOR_altivec_vcfux
14080 || icode == CODE_FOR_altivec_vcfsx
14081 || icode == CODE_FOR_altivec_vctsxs
14082 || icode == CODE_FOR_altivec_vctuxs
14083 || icode == CODE_FOR_altivec_vspltb
14084 || icode == CODE_FOR_altivec_vsplth
14085 || icode == CODE_FOR_altivec_vspltw)
14087 /* Only allow 5-bit unsigned literals. */
14088 STRIP_NOPS (arg1);
14089 if (TREE_CODE (arg1) != INTEGER_CST
14090 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14092 error ("argument 2 must be a 5-bit unsigned literal");
14093 return CONST0_RTX (tmode);
14096 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14097 || icode == CODE_FOR_dfptstsfi_lt_dd
14098 || icode == CODE_FOR_dfptstsfi_gt_dd
14099 || icode == CODE_FOR_dfptstsfi_unordered_dd
14100 || icode == CODE_FOR_dfptstsfi_eq_td
14101 || icode == CODE_FOR_dfptstsfi_lt_td
14102 || icode == CODE_FOR_dfptstsfi_gt_td
14103 || icode == CODE_FOR_dfptstsfi_unordered_td)
14105 /* Only allow 6-bit unsigned literals. */
14106 STRIP_NOPS (arg0);
14107 if (TREE_CODE (arg0) != INTEGER_CST
14108 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14110 error ("argument 1 must be a 6-bit unsigned literal");
14111 return CONST0_RTX (tmode);
14114 else if (icode == CODE_FOR_xststdcqp_kf
14115 || icode == CODE_FOR_xststdcqp_tf
14116 || icode == CODE_FOR_xststdcdp
14117 || icode == CODE_FOR_xststdcsp
14118 || icode == CODE_FOR_xvtstdcdp
14119 || icode == CODE_FOR_xvtstdcsp)
14121 /* Only allow 7-bit unsigned literals. */
14122 STRIP_NOPS (arg1);
14123 if (TREE_CODE (arg1) != INTEGER_CST
14124 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14126 error ("argument 2 must be a 7-bit unsigned literal");
14127 return CONST0_RTX (tmode);
14130 else if (icode == CODE_FOR_unpackv1ti
14131 || icode == CODE_FOR_unpackkf
14132 || icode == CODE_FOR_unpacktf
14133 || icode == CODE_FOR_unpackif
14134 || icode == CODE_FOR_unpacktd)
14136 /* Only allow 1-bit unsigned literals. */
14137 STRIP_NOPS (arg1);
14138 if (TREE_CODE (arg1) != INTEGER_CST
14139 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14141 error ("argument 2 must be a 1-bit unsigned literal");
14142 return CONST0_RTX (tmode);
14146 if (target == 0
14147 || GET_MODE (target) != tmode
14148 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14149 target = gen_reg_rtx (tmode);
14151 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14152 op0 = copy_to_mode_reg (mode0, op0);
14153 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14154 op1 = copy_to_mode_reg (mode1, op1);
14156 pat = GEN_FCN (icode) (target, op0, op1);
14157 if (! pat)
14158 return 0;
14159 emit_insn (pat);
14161 return target;
14164 static rtx
14165 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14167 rtx pat, scratch;
14168 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14169 tree arg0 = CALL_EXPR_ARG (exp, 1);
14170 tree arg1 = CALL_EXPR_ARG (exp, 2);
14171 rtx op0 = expand_normal (arg0);
14172 rtx op1 = expand_normal (arg1);
14173 machine_mode tmode = SImode;
14174 machine_mode mode0 = insn_data[icode].operand[1].mode;
14175 machine_mode mode1 = insn_data[icode].operand[2].mode;
14176 int cr6_form_int;
14178 if (TREE_CODE (cr6_form) != INTEGER_CST)
14180 error ("argument 1 of %qs must be a constant",
14181 "__builtin_altivec_predicate");
14182 return const0_rtx;
14184 else
14185 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14187 gcc_assert (mode0 == mode1);
14189 /* If we have invalid arguments, bail out before generating bad rtl. */
14190 if (arg0 == error_mark_node || arg1 == error_mark_node)
14191 return const0_rtx;
14193 if (target == 0
14194 || GET_MODE (target) != tmode
14195 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14196 target = gen_reg_rtx (tmode);
14198 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14199 op0 = copy_to_mode_reg (mode0, op0);
14200 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14201 op1 = copy_to_mode_reg (mode1, op1);
14203 /* Note that for many of the relevant operations (e.g. cmpne or
14204 cmpeq) with float or double operands, it makes more sense for the
14205 mode of the allocated scratch register to select a vector of
14206 integer. But the choice to copy the mode of operand 0 was made
14207 long ago and there are no plans to change it. */
14208 scratch = gen_reg_rtx (mode0);
14210 pat = GEN_FCN (icode) (scratch, op0, op1);
14211 if (! pat)
14212 return 0;
14213 emit_insn (pat);
14215 /* The vec_any* and vec_all* predicates use the same opcodes for two
14216 different operations, but the bits in CR6 will be different
14217 depending on what information we want. So we have to play tricks
14218 with CR6 to get the right bits out.
14220 If you think this is disgusting, look at the specs for the
14221 AltiVec predicates. */
14223 switch (cr6_form_int)
14225 case 0:
14226 emit_insn (gen_cr6_test_for_zero (target));
14227 break;
14228 case 1:
14229 emit_insn (gen_cr6_test_for_zero_reverse (target));
14230 break;
14231 case 2:
14232 emit_insn (gen_cr6_test_for_lt (target));
14233 break;
14234 case 3:
14235 emit_insn (gen_cr6_test_for_lt_reverse (target));
14236 break;
14237 default:
14238 error ("argument 1 of %qs is out of range",
14239 "__builtin_altivec_predicate");
14240 break;
14243 return target;
14246 static rtx
14247 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14249 rtx pat, addr;
14250 tree arg0 = CALL_EXPR_ARG (exp, 0);
14251 tree arg1 = CALL_EXPR_ARG (exp, 1);
14252 machine_mode tmode = insn_data[icode].operand[0].mode;
14253 machine_mode mode0 = Pmode;
14254 machine_mode mode1 = Pmode;
14255 rtx op0 = expand_normal (arg0);
14256 rtx op1 = expand_normal (arg1);
14258 if (icode == CODE_FOR_nothing)
14259 /* Builtin not supported on this processor. */
14260 return 0;
14262 /* If we got invalid arguments bail out before generating bad rtl. */
14263 if (arg0 == error_mark_node || arg1 == error_mark_node)
14264 return const0_rtx;
14266 if (target == 0
14267 || GET_MODE (target) != tmode
14268 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14269 target = gen_reg_rtx (tmode);
14271 op1 = copy_to_mode_reg (mode1, op1);
14273 if (op0 == const0_rtx)
14275 addr = gen_rtx_MEM (tmode, op1);
14277 else
14279 op0 = copy_to_mode_reg (mode0, op0);
14280 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14283 pat = GEN_FCN (icode) (target, addr);
14285 if (! pat)
14286 return 0;
14287 emit_insn (pat);
14289 return target;
14292 /* Return a constant vector for use as a little-endian permute control vector
14293 to reverse the order of elements of the given vector mode. */
14294 static rtx
14295 swap_selector_for_mode (machine_mode mode)
14297 /* These are little endian vectors, so their elements are reversed
14298 from what you would normally expect for a permute control vector. */
14299 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14300 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14301 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14302 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14303 unsigned int *swaparray, i;
14304 rtx perm[16];
14306 switch (mode)
14308 case E_V2DFmode:
14309 case E_V2DImode:
14310 swaparray = swap2;
14311 break;
14312 case E_V4SFmode:
14313 case E_V4SImode:
14314 swaparray = swap4;
14315 break;
14316 case E_V8HImode:
14317 swaparray = swap8;
14318 break;
14319 case E_V16QImode:
14320 swaparray = swap16;
14321 break;
14322 default:
14323 gcc_unreachable ();
14326 for (i = 0; i < 16; ++i)
14327 perm[i] = GEN_INT (swaparray[i]);
14329 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14333 swap_endian_selector_for_mode (machine_mode mode)
14335 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14336 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14337 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14338 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14340 unsigned int *swaparray, i;
14341 rtx perm[16];
14343 switch (mode)
14345 case E_V1TImode:
14346 swaparray = swap1;
14347 break;
14348 case E_V2DFmode:
14349 case E_V2DImode:
14350 swaparray = swap2;
14351 break;
14352 case E_V4SFmode:
14353 case E_V4SImode:
14354 swaparray = swap4;
14355 break;
14356 case E_V8HImode:
14357 swaparray = swap8;
14358 break;
14359 default:
14360 gcc_unreachable ();
14363 for (i = 0; i < 16; ++i)
14364 perm[i] = GEN_INT (swaparray[i]);
14366 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
14367 gen_rtvec_v (16, perm)));
14370 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14371 with -maltivec=be specified. Issue the load followed by an element-
14372 reversing permute. */
14373 void
14374 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14376 rtx tmp = gen_reg_rtx (mode);
14377 rtx load = gen_rtx_SET (tmp, op1);
14378 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14379 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14380 rtx sel = swap_selector_for_mode (mode);
14381 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14383 gcc_assert (REG_P (op0));
14384 emit_insn (par);
14385 emit_insn (gen_rtx_SET (op0, vperm));
14388 /* Generate code for a "stvxl" built-in for a little endian target with
14389 -maltivec=be specified. Issue the store preceded by an element-reversing
14390 permute. */
14391 void
14392 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14394 rtx tmp = gen_reg_rtx (mode);
14395 rtx store = gen_rtx_SET (op0, tmp);
14396 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14397 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14398 rtx sel = swap_selector_for_mode (mode);
14399 rtx vperm;
14401 gcc_assert (REG_P (op1));
14402 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14403 emit_insn (gen_rtx_SET (tmp, vperm));
14404 emit_insn (par);
14407 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14408 specified. Issue the store preceded by an element-reversing permute. */
14409 void
14410 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14412 machine_mode inner_mode = GET_MODE_INNER (mode);
14413 rtx tmp = gen_reg_rtx (mode);
14414 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14415 rtx sel = swap_selector_for_mode (mode);
14416 rtx vperm;
14418 gcc_assert (REG_P (op1));
14419 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14420 emit_insn (gen_rtx_SET (tmp, vperm));
14421 emit_insn (gen_rtx_SET (op0, stvx));
14424 static rtx
14425 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14427 rtx pat, addr;
14428 tree arg0 = CALL_EXPR_ARG (exp, 0);
14429 tree arg1 = CALL_EXPR_ARG (exp, 1);
14430 machine_mode tmode = insn_data[icode].operand[0].mode;
14431 machine_mode mode0 = Pmode;
14432 machine_mode mode1 = Pmode;
14433 rtx op0 = expand_normal (arg0);
14434 rtx op1 = expand_normal (arg1);
14436 if (icode == CODE_FOR_nothing)
14437 /* Builtin not supported on this processor. */
14438 return 0;
14440 /* If we got invalid arguments bail out before generating bad rtl. */
14441 if (arg0 == error_mark_node || arg1 == error_mark_node)
14442 return const0_rtx;
14444 if (target == 0
14445 || GET_MODE (target) != tmode
14446 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14447 target = gen_reg_rtx (tmode);
14449 op1 = copy_to_mode_reg (mode1, op1);
14451 /* For LVX, express the RTL accurately by ANDing the address with -16.
14452 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14453 so the raw address is fine. */
14454 if (icode == CODE_FOR_altivec_lvx_v1ti
14455 || icode == CODE_FOR_altivec_lvx_v2df
14456 || icode == CODE_FOR_altivec_lvx_v2di
14457 || icode == CODE_FOR_altivec_lvx_v4sf
14458 || icode == CODE_FOR_altivec_lvx_v4si
14459 || icode == CODE_FOR_altivec_lvx_v8hi
14460 || icode == CODE_FOR_altivec_lvx_v16qi)
14462 rtx rawaddr;
14463 if (op0 == const0_rtx)
14464 rawaddr = op1;
14465 else
14467 op0 = copy_to_mode_reg (mode0, op0);
14468 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14470 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14471 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14473 /* For -maltivec=be, emit the load and follow it up with a
14474 permute to swap the elements. */
14475 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14477 rtx temp = gen_reg_rtx (tmode);
14478 emit_insn (gen_rtx_SET (temp, addr));
14480 rtx sel = swap_selector_for_mode (tmode);
14481 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14482 UNSPEC_VPERM);
14483 emit_insn (gen_rtx_SET (target, vperm));
14485 else
14486 emit_insn (gen_rtx_SET (target, addr));
14488 else
14490 if (op0 == const0_rtx)
14491 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14492 else
14494 op0 = copy_to_mode_reg (mode0, op0);
14495 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14496 gen_rtx_PLUS (Pmode, op1, op0));
14499 pat = GEN_FCN (icode) (target, addr);
14500 if (! pat)
14501 return 0;
14502 emit_insn (pat);
14505 return target;
14508 static rtx
14509 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14511 tree arg0 = CALL_EXPR_ARG (exp, 0);
14512 tree arg1 = CALL_EXPR_ARG (exp, 1);
14513 tree arg2 = CALL_EXPR_ARG (exp, 2);
14514 rtx op0 = expand_normal (arg0);
14515 rtx op1 = expand_normal (arg1);
14516 rtx op2 = expand_normal (arg2);
14517 rtx pat, addr;
14518 machine_mode tmode = insn_data[icode].operand[0].mode;
14519 machine_mode mode1 = Pmode;
14520 machine_mode mode2 = Pmode;
14522 /* Invalid arguments. Bail before doing anything stoopid! */
14523 if (arg0 == error_mark_node
14524 || arg1 == error_mark_node
14525 || arg2 == error_mark_node)
14526 return const0_rtx;
14528 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14529 op0 = copy_to_mode_reg (tmode, op0);
14531 op2 = copy_to_mode_reg (mode2, op2);
14533 if (op1 == const0_rtx)
14535 addr = gen_rtx_MEM (tmode, op2);
14537 else
14539 op1 = copy_to_mode_reg (mode1, op1);
14540 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14543 pat = GEN_FCN (icode) (addr, op0);
14544 if (pat)
14545 emit_insn (pat);
14546 return NULL_RTX;
14549 static rtx
14550 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14552 rtx pat;
14553 tree arg0 = CALL_EXPR_ARG (exp, 0);
14554 tree arg1 = CALL_EXPR_ARG (exp, 1);
14555 tree arg2 = CALL_EXPR_ARG (exp, 2);
14556 rtx op0 = expand_normal (arg0);
14557 rtx op1 = expand_normal (arg1);
14558 rtx op2 = expand_normal (arg2);
14559 machine_mode mode0 = insn_data[icode].operand[0].mode;
14560 machine_mode mode1 = insn_data[icode].operand[1].mode;
14561 machine_mode mode2 = insn_data[icode].operand[2].mode;
14563 if (icode == CODE_FOR_nothing)
14564 /* Builtin not supported on this processor. */
14565 return NULL_RTX;
14567 /* If we got invalid arguments bail out before generating bad rtl. */
14568 if (arg0 == error_mark_node
14569 || arg1 == error_mark_node
14570 || arg2 == error_mark_node)
14571 return NULL_RTX;
14573 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14574 op0 = copy_to_mode_reg (mode0, op0);
14575 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14576 op1 = copy_to_mode_reg (mode1, op1);
14577 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14578 op2 = copy_to_mode_reg (mode2, op2);
14580 pat = GEN_FCN (icode) (op0, op1, op2);
14581 if (pat)
14582 emit_insn (pat);
14584 return NULL_RTX;
14587 static rtx
14588 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14590 tree arg0 = CALL_EXPR_ARG (exp, 0);
14591 tree arg1 = CALL_EXPR_ARG (exp, 1);
14592 tree arg2 = CALL_EXPR_ARG (exp, 2);
14593 rtx op0 = expand_normal (arg0);
14594 rtx op1 = expand_normal (arg1);
14595 rtx op2 = expand_normal (arg2);
14596 rtx pat, addr, rawaddr;
14597 machine_mode tmode = insn_data[icode].operand[0].mode;
14598 machine_mode smode = insn_data[icode].operand[1].mode;
14599 machine_mode mode1 = Pmode;
14600 machine_mode mode2 = Pmode;
14602 /* Invalid arguments. Bail before doing anything stoopid! */
14603 if (arg0 == error_mark_node
14604 || arg1 == error_mark_node
14605 || arg2 == error_mark_node)
14606 return const0_rtx;
14608 op2 = copy_to_mode_reg (mode2, op2);
14610 /* For STVX, express the RTL accurately by ANDing the address with -16.
14611 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14612 so the raw address is fine. */
14613 if (icode == CODE_FOR_altivec_stvx_v2df
14614 || icode == CODE_FOR_altivec_stvx_v2di
14615 || icode == CODE_FOR_altivec_stvx_v4sf
14616 || icode == CODE_FOR_altivec_stvx_v4si
14617 || icode == CODE_FOR_altivec_stvx_v8hi
14618 || icode == CODE_FOR_altivec_stvx_v16qi)
14620 if (op1 == const0_rtx)
14621 rawaddr = op2;
14622 else
14624 op1 = copy_to_mode_reg (mode1, op1);
14625 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14628 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14629 addr = gen_rtx_MEM (tmode, addr);
14631 op0 = copy_to_mode_reg (tmode, op0);
14633 /* For -maltivec=be, emit a permute to swap the elements, followed
14634 by the store. */
14635 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14637 rtx temp = gen_reg_rtx (tmode);
14638 rtx sel = swap_selector_for_mode (tmode);
14639 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14640 UNSPEC_VPERM);
14641 emit_insn (gen_rtx_SET (temp, vperm));
14642 emit_insn (gen_rtx_SET (addr, temp));
14644 else
14645 emit_insn (gen_rtx_SET (addr, op0));
14647 else
14649 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14650 op0 = copy_to_mode_reg (smode, op0);
14652 if (op1 == const0_rtx)
14653 addr = gen_rtx_MEM (tmode, op2);
14654 else
14656 op1 = copy_to_mode_reg (mode1, op1);
14657 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14660 pat = GEN_FCN (icode) (addr, op0);
14661 if (pat)
14662 emit_insn (pat);
14665 return NULL_RTX;
14668 /* Return the appropriate SPR number associated with the given builtin. */
14669 static inline HOST_WIDE_INT
14670 htm_spr_num (enum rs6000_builtins code)
14672 if (code == HTM_BUILTIN_GET_TFHAR
14673 || code == HTM_BUILTIN_SET_TFHAR)
14674 return TFHAR_SPR;
14675 else if (code == HTM_BUILTIN_GET_TFIAR
14676 || code == HTM_BUILTIN_SET_TFIAR)
14677 return TFIAR_SPR;
14678 else if (code == HTM_BUILTIN_GET_TEXASR
14679 || code == HTM_BUILTIN_SET_TEXASR)
14680 return TEXASR_SPR;
14681 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14682 || code == HTM_BUILTIN_SET_TEXASRU);
14683 return TEXASRU_SPR;
14686 /* Return the appropriate SPR regno associated with the given builtin. */
14687 static inline HOST_WIDE_INT
14688 htm_spr_regno (enum rs6000_builtins code)
14690 if (code == HTM_BUILTIN_GET_TFHAR
14691 || code == HTM_BUILTIN_SET_TFHAR)
14692 return TFHAR_REGNO;
14693 else if (code == HTM_BUILTIN_GET_TFIAR
14694 || code == HTM_BUILTIN_SET_TFIAR)
14695 return TFIAR_REGNO;
14696 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14697 || code == HTM_BUILTIN_SET_TEXASR
14698 || code == HTM_BUILTIN_GET_TEXASRU
14699 || code == HTM_BUILTIN_SET_TEXASRU);
14700 return TEXASR_REGNO;
14703 /* Return the correct ICODE value depending on whether we are
14704 setting or reading the HTM SPRs. */
14705 static inline enum insn_code
14706 rs6000_htm_spr_icode (bool nonvoid)
14708 if (nonvoid)
14709 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14710 else
14711 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14714 /* Expand the HTM builtin in EXP and store the result in TARGET.
14715 Store true in *EXPANDEDP if we found a builtin to expand. */
14716 static rtx
14717 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14719 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14720 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14721 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14722 const struct builtin_description *d;
14723 size_t i;
14725 *expandedp = true;
14727 if (!TARGET_POWERPC64
14728 && (fcode == HTM_BUILTIN_TABORTDC
14729 || fcode == HTM_BUILTIN_TABORTDCI))
14731 size_t uns_fcode = (size_t)fcode;
14732 const char *name = rs6000_builtin_info[uns_fcode].name;
14733 error ("builtin %qs is only valid in 64-bit mode", name);
14734 return const0_rtx;
14737 /* Expand the HTM builtins. */
14738 d = bdesc_htm;
14739 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14740 if (d->code == fcode)
14742 rtx op[MAX_HTM_OPERANDS], pat;
14743 int nopnds = 0;
14744 tree arg;
14745 call_expr_arg_iterator iter;
14746 unsigned attr = rs6000_builtin_info[fcode].attr;
14747 enum insn_code icode = d->icode;
14748 const struct insn_operand_data *insn_op;
14749 bool uses_spr = (attr & RS6000_BTC_SPR);
14750 rtx cr = NULL_RTX;
14752 if (uses_spr)
14753 icode = rs6000_htm_spr_icode (nonvoid);
14754 insn_op = &insn_data[icode].operand[0];
14756 if (nonvoid)
14758 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14759 if (!target
14760 || GET_MODE (target) != tmode
14761 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14762 target = gen_reg_rtx (tmode);
14763 if (uses_spr)
14764 op[nopnds++] = target;
14767 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14769 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14770 return const0_rtx;
14772 insn_op = &insn_data[icode].operand[nopnds];
14774 op[nopnds] = expand_normal (arg);
14776 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14778 if (!strcmp (insn_op->constraint, "n"))
14780 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14781 if (!CONST_INT_P (op[nopnds]))
14782 error ("argument %d must be an unsigned literal", arg_num);
14783 else
14784 error ("argument %d is an unsigned literal that is "
14785 "out of range", arg_num);
14786 return const0_rtx;
14788 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14791 nopnds++;
14794 /* Handle the builtins for extended mnemonics. These accept
14795 no arguments, but map to builtins that take arguments. */
14796 switch (fcode)
14798 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14799 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14800 op[nopnds++] = GEN_INT (1);
14801 if (flag_checking)
14802 attr |= RS6000_BTC_UNARY;
14803 break;
14804 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14805 op[nopnds++] = GEN_INT (0);
14806 if (flag_checking)
14807 attr |= RS6000_BTC_UNARY;
14808 break;
14809 default:
14810 break;
14813 /* If this builtin accesses SPRs, then pass in the appropriate
14814 SPR number and SPR regno as the last two operands. */
14815 if (uses_spr)
14817 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14818 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14819 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14821 /* If this builtin accesses a CR, then pass in a scratch
14822 CR as the last operand. */
14823 else if (attr & RS6000_BTC_CR)
14824 { cr = gen_reg_rtx (CCmode);
14825 op[nopnds++] = cr;
14828 if (flag_checking)
14830 int expected_nopnds = 0;
14831 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14832 expected_nopnds = 1;
14833 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14834 expected_nopnds = 2;
14835 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14836 expected_nopnds = 3;
14837 if (!(attr & RS6000_BTC_VOID))
14838 expected_nopnds += 1;
14839 if (uses_spr)
14840 expected_nopnds += 2;
14842 gcc_assert (nopnds == expected_nopnds
14843 && nopnds <= MAX_HTM_OPERANDS);
14846 switch (nopnds)
14848 case 1:
14849 pat = GEN_FCN (icode) (op[0]);
14850 break;
14851 case 2:
14852 pat = GEN_FCN (icode) (op[0], op[1]);
14853 break;
14854 case 3:
14855 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14856 break;
14857 case 4:
14858 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14859 break;
14860 default:
14861 gcc_unreachable ();
14863 if (!pat)
14864 return NULL_RTX;
14865 emit_insn (pat);
14867 if (attr & RS6000_BTC_CR)
14869 if (fcode == HTM_BUILTIN_TBEGIN)
14871 /* Emit code to set TARGET to true or false depending on
14872 whether the tbegin. instruction successfully or failed
14873 to start a transaction. We do this by placing the 1's
14874 complement of CR's EQ bit into TARGET. */
14875 rtx scratch = gen_reg_rtx (SImode);
14876 emit_insn (gen_rtx_SET (scratch,
14877 gen_rtx_EQ (SImode, cr,
14878 const0_rtx)));
14879 emit_insn (gen_rtx_SET (target,
14880 gen_rtx_XOR (SImode, scratch,
14881 GEN_INT (1))));
14883 else
14885 /* Emit code to copy the 4-bit condition register field
14886 CR into the least significant end of register TARGET. */
14887 rtx scratch1 = gen_reg_rtx (SImode);
14888 rtx scratch2 = gen_reg_rtx (SImode);
14889 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14890 emit_insn (gen_movcc (subreg, cr));
14891 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14892 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14896 if (nonvoid)
14897 return target;
14898 return const0_rtx;
14901 *expandedp = false;
14902 return NULL_RTX;
14905 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14907 static rtx
14908 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14909 rtx target)
14911 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14912 if (fcode == RS6000_BUILTIN_CPU_INIT)
14913 return const0_rtx;
14915 if (target == 0 || GET_MODE (target) != SImode)
14916 target = gen_reg_rtx (SImode);
14918 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14919 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14920 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14921 to a STRING_CST. */
14922 if (TREE_CODE (arg) == ARRAY_REF
14923 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14924 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14925 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14926 arg = TREE_OPERAND (arg, 0);
14928 if (TREE_CODE (arg) != STRING_CST)
14930 error ("builtin %qs only accepts a string argument",
14931 rs6000_builtin_info[(size_t) fcode].name);
14932 return const0_rtx;
14935 if (fcode == RS6000_BUILTIN_CPU_IS)
14937 const char *cpu = TREE_STRING_POINTER (arg);
14938 rtx cpuid = NULL_RTX;
14939 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14940 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14942 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14943 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14944 break;
14946 if (cpuid == NULL_RTX)
14948 /* Invalid CPU argument. */
14949 error ("cpu %qs is an invalid argument to builtin %qs",
14950 cpu, rs6000_builtin_info[(size_t) fcode].name);
14951 return const0_rtx;
14954 rtx platform = gen_reg_rtx (SImode);
14955 rtx tcbmem = gen_const_mem (SImode,
14956 gen_rtx_PLUS (Pmode,
14957 gen_rtx_REG (Pmode, TLS_REGNUM),
14958 GEN_INT (TCB_PLATFORM_OFFSET)));
14959 emit_move_insn (platform, tcbmem);
14960 emit_insn (gen_eqsi3 (target, platform, cpuid));
14962 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14964 const char *hwcap = TREE_STRING_POINTER (arg);
14965 rtx mask = NULL_RTX;
14966 int hwcap_offset;
14967 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14968 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14970 mask = GEN_INT (cpu_supports_info[i].mask);
14971 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14972 break;
14974 if (mask == NULL_RTX)
14976 /* Invalid HWCAP argument. */
14977 error ("%s %qs is an invalid argument to builtin %qs",
14978 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14979 return const0_rtx;
14982 rtx tcb_hwcap = gen_reg_rtx (SImode);
14983 rtx tcbmem = gen_const_mem (SImode,
14984 gen_rtx_PLUS (Pmode,
14985 gen_rtx_REG (Pmode, TLS_REGNUM),
14986 GEN_INT (hwcap_offset)));
14987 emit_move_insn (tcb_hwcap, tcbmem);
14988 rtx scratch1 = gen_reg_rtx (SImode);
14989 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14990 rtx scratch2 = gen_reg_rtx (SImode);
14991 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14992 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14994 else
14995 gcc_unreachable ();
14997 /* Record that we have expanded a CPU builtin, so that we can later
14998 emit a reference to the special symbol exported by LIBC to ensure we
14999 do not link against an old LIBC that doesn't support this feature. */
15000 cpu_builtin_p = true;
15002 #else
15003 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
15004 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
15006 /* For old LIBCs, always return FALSE. */
15007 emit_move_insn (target, GEN_INT (0));
15008 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15010 return target;
15013 static rtx
15014 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15016 rtx pat;
15017 tree arg0 = CALL_EXPR_ARG (exp, 0);
15018 tree arg1 = CALL_EXPR_ARG (exp, 1);
15019 tree arg2 = CALL_EXPR_ARG (exp, 2);
15020 rtx op0 = expand_normal (arg0);
15021 rtx op1 = expand_normal (arg1);
15022 rtx op2 = expand_normal (arg2);
15023 machine_mode tmode = insn_data[icode].operand[0].mode;
15024 machine_mode mode0 = insn_data[icode].operand[1].mode;
15025 machine_mode mode1 = insn_data[icode].operand[2].mode;
15026 machine_mode mode2 = insn_data[icode].operand[3].mode;
15028 if (icode == CODE_FOR_nothing)
15029 /* Builtin not supported on this processor. */
15030 return 0;
15032 /* If we got invalid arguments bail out before generating bad rtl. */
15033 if (arg0 == error_mark_node
15034 || arg1 == error_mark_node
15035 || arg2 == error_mark_node)
15036 return const0_rtx;
15038 /* Check and prepare argument depending on the instruction code.
15040 Note that a switch statement instead of the sequence of tests
15041 would be incorrect as many of the CODE_FOR values could be
15042 CODE_FOR_nothing and that would yield multiple alternatives
15043 with identical values. We'd never reach here at runtime in
15044 this case. */
15045 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15046 || icode == CODE_FOR_altivec_vsldoi_v2df
15047 || icode == CODE_FOR_altivec_vsldoi_v4si
15048 || icode == CODE_FOR_altivec_vsldoi_v8hi
15049 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15051 /* Only allow 4-bit unsigned literals. */
15052 STRIP_NOPS (arg2);
15053 if (TREE_CODE (arg2) != INTEGER_CST
15054 || TREE_INT_CST_LOW (arg2) & ~0xf)
15056 error ("argument 3 must be a 4-bit unsigned literal");
15057 return CONST0_RTX (tmode);
15060 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15061 || icode == CODE_FOR_vsx_xxpermdi_v2di
15062 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15063 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15064 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15065 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15066 || icode == CODE_FOR_vsx_xxpermdi_v4si
15067 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15068 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15069 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15070 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15071 || icode == CODE_FOR_vsx_xxsldwi_v4si
15072 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15073 || icode == CODE_FOR_vsx_xxsldwi_v2di
15074 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15076 /* Only allow 2-bit unsigned literals. */
15077 STRIP_NOPS (arg2);
15078 if (TREE_CODE (arg2) != INTEGER_CST
15079 || TREE_INT_CST_LOW (arg2) & ~0x3)
15081 error ("argument 3 must be a 2-bit unsigned literal");
15082 return CONST0_RTX (tmode);
15085 else if (icode == CODE_FOR_vsx_set_v2df
15086 || icode == CODE_FOR_vsx_set_v2di
15087 || icode == CODE_FOR_bcdadd
15088 || icode == CODE_FOR_bcdadd_lt
15089 || icode == CODE_FOR_bcdadd_eq
15090 || icode == CODE_FOR_bcdadd_gt
15091 || icode == CODE_FOR_bcdsub
15092 || icode == CODE_FOR_bcdsub_lt
15093 || icode == CODE_FOR_bcdsub_eq
15094 || icode == CODE_FOR_bcdsub_gt)
15096 /* Only allow 1-bit unsigned literals. */
15097 STRIP_NOPS (arg2);
15098 if (TREE_CODE (arg2) != INTEGER_CST
15099 || TREE_INT_CST_LOW (arg2) & ~0x1)
15101 error ("argument 3 must be a 1-bit unsigned literal");
15102 return CONST0_RTX (tmode);
15105 else if (icode == CODE_FOR_dfp_ddedpd_dd
15106 || icode == CODE_FOR_dfp_ddedpd_td)
15108 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15109 STRIP_NOPS (arg0);
15110 if (TREE_CODE (arg0) != INTEGER_CST
15111 || TREE_INT_CST_LOW (arg2) & ~0x3)
15113 error ("argument 1 must be 0 or 2");
15114 return CONST0_RTX (tmode);
15117 else if (icode == CODE_FOR_dfp_denbcd_dd
15118 || icode == CODE_FOR_dfp_denbcd_td)
15120 /* Only allow 1-bit unsigned literals. */
15121 STRIP_NOPS (arg0);
15122 if (TREE_CODE (arg0) != INTEGER_CST
15123 || TREE_INT_CST_LOW (arg0) & ~0x1)
15125 error ("argument 1 must be a 1-bit unsigned literal");
15126 return CONST0_RTX (tmode);
15129 else if (icode == CODE_FOR_dfp_dscli_dd
15130 || icode == CODE_FOR_dfp_dscli_td
15131 || icode == CODE_FOR_dfp_dscri_dd
15132 || icode == CODE_FOR_dfp_dscri_td)
15134 /* Only allow 6-bit unsigned literals. */
15135 STRIP_NOPS (arg1);
15136 if (TREE_CODE (arg1) != INTEGER_CST
15137 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15139 error ("argument 2 must be a 6-bit unsigned literal");
15140 return CONST0_RTX (tmode);
15143 else if (icode == CODE_FOR_crypto_vshasigmaw
15144 || icode == CODE_FOR_crypto_vshasigmad)
15146 /* Check whether the 2nd and 3rd arguments are integer constants and in
15147 range and prepare arguments. */
15148 STRIP_NOPS (arg1);
15149 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
15151 error ("argument 2 must be 0 or 1");
15152 return CONST0_RTX (tmode);
15155 STRIP_NOPS (arg2);
15156 if (TREE_CODE (arg2) != INTEGER_CST
15157 || wi::geu_p (wi::to_wide (arg2), 16))
15159 error ("argument 3 must be in the range 0..15");
15160 return CONST0_RTX (tmode);
15164 if (target == 0
15165 || GET_MODE (target) != tmode
15166 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15167 target = gen_reg_rtx (tmode);
15169 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15170 op0 = copy_to_mode_reg (mode0, op0);
15171 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15172 op1 = copy_to_mode_reg (mode1, op1);
15173 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15174 op2 = copy_to_mode_reg (mode2, op2);
15176 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15177 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15178 else
15179 pat = GEN_FCN (icode) (target, op0, op1, op2);
15180 if (! pat)
15181 return 0;
15182 emit_insn (pat);
15184 return target;
15187 /* Expand the lvx builtins. */
15188 static rtx
15189 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15191 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15192 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15193 tree arg0;
15194 machine_mode tmode, mode0;
15195 rtx pat, op0;
15196 enum insn_code icode;
15198 switch (fcode)
15200 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15201 icode = CODE_FOR_vector_altivec_load_v16qi;
15202 break;
15203 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15204 icode = CODE_FOR_vector_altivec_load_v8hi;
15205 break;
15206 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15207 icode = CODE_FOR_vector_altivec_load_v4si;
15208 break;
15209 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15210 icode = CODE_FOR_vector_altivec_load_v4sf;
15211 break;
15212 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15213 icode = CODE_FOR_vector_altivec_load_v2df;
15214 break;
15215 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15216 icode = CODE_FOR_vector_altivec_load_v2di;
15217 break;
15218 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15219 icode = CODE_FOR_vector_altivec_load_v1ti;
15220 break;
15221 default:
15222 *expandedp = false;
15223 return NULL_RTX;
15226 *expandedp = true;
15228 arg0 = CALL_EXPR_ARG (exp, 0);
15229 op0 = expand_normal (arg0);
15230 tmode = insn_data[icode].operand[0].mode;
15231 mode0 = insn_data[icode].operand[1].mode;
15233 if (target == 0
15234 || GET_MODE (target) != tmode
15235 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15236 target = gen_reg_rtx (tmode);
15238 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15239 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15241 pat = GEN_FCN (icode) (target, op0);
15242 if (! pat)
15243 return 0;
15244 emit_insn (pat);
15245 return target;
15248 /* Expand the stvx builtins. */
15249 static rtx
15250 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15251 bool *expandedp)
15253 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15254 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15255 tree arg0, arg1;
15256 machine_mode mode0, mode1;
15257 rtx pat, op0, op1;
15258 enum insn_code icode;
15260 switch (fcode)
15262 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15263 icode = CODE_FOR_vector_altivec_store_v16qi;
15264 break;
15265 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15266 icode = CODE_FOR_vector_altivec_store_v8hi;
15267 break;
15268 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15269 icode = CODE_FOR_vector_altivec_store_v4si;
15270 break;
15271 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15272 icode = CODE_FOR_vector_altivec_store_v4sf;
15273 break;
15274 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15275 icode = CODE_FOR_vector_altivec_store_v2df;
15276 break;
15277 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15278 icode = CODE_FOR_vector_altivec_store_v2di;
15279 break;
15280 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15281 icode = CODE_FOR_vector_altivec_store_v1ti;
15282 break;
15283 default:
15284 *expandedp = false;
15285 return NULL_RTX;
15288 arg0 = CALL_EXPR_ARG (exp, 0);
15289 arg1 = CALL_EXPR_ARG (exp, 1);
15290 op0 = expand_normal (arg0);
15291 op1 = expand_normal (arg1);
15292 mode0 = insn_data[icode].operand[0].mode;
15293 mode1 = insn_data[icode].operand[1].mode;
15295 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15296 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15297 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15298 op1 = copy_to_mode_reg (mode1, op1);
15300 pat = GEN_FCN (icode) (op0, op1);
15301 if (pat)
15302 emit_insn (pat);
15304 *expandedp = true;
15305 return NULL_RTX;
15308 /* Expand the dst builtins. */
15309 static rtx
15310 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15311 bool *expandedp)
15313 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15314 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15315 tree arg0, arg1, arg2;
15316 machine_mode mode0, mode1;
15317 rtx pat, op0, op1, op2;
15318 const struct builtin_description *d;
15319 size_t i;
15321 *expandedp = false;
15323 /* Handle DST variants. */
15324 d = bdesc_dst;
15325 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15326 if (d->code == fcode)
15328 arg0 = CALL_EXPR_ARG (exp, 0);
15329 arg1 = CALL_EXPR_ARG (exp, 1);
15330 arg2 = CALL_EXPR_ARG (exp, 2);
15331 op0 = expand_normal (arg0);
15332 op1 = expand_normal (arg1);
15333 op2 = expand_normal (arg2);
15334 mode0 = insn_data[d->icode].operand[0].mode;
15335 mode1 = insn_data[d->icode].operand[1].mode;
15337 /* Invalid arguments, bail out before generating bad rtl. */
15338 if (arg0 == error_mark_node
15339 || arg1 == error_mark_node
15340 || arg2 == error_mark_node)
15341 return const0_rtx;
15343 *expandedp = true;
15344 STRIP_NOPS (arg2);
15345 if (TREE_CODE (arg2) != INTEGER_CST
15346 || TREE_INT_CST_LOW (arg2) & ~0x3)
15348 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15349 return const0_rtx;
15352 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15353 op0 = copy_to_mode_reg (Pmode, op0);
15354 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15355 op1 = copy_to_mode_reg (mode1, op1);
15357 pat = GEN_FCN (d->icode) (op0, op1, op2);
15358 if (pat != 0)
15359 emit_insn (pat);
15361 return NULL_RTX;
15364 return NULL_RTX;
15367 /* Expand vec_init builtin. */
15368 static rtx
15369 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15371 machine_mode tmode = TYPE_MODE (type);
15372 machine_mode inner_mode = GET_MODE_INNER (tmode);
15373 int i, n_elt = GET_MODE_NUNITS (tmode);
15375 gcc_assert (VECTOR_MODE_P (tmode));
15376 gcc_assert (n_elt == call_expr_nargs (exp));
15378 if (!target || !register_operand (target, tmode))
15379 target = gen_reg_rtx (tmode);
15381 /* If we have a vector compromised of a single element, such as V1TImode, do
15382 the initialization directly. */
15383 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15385 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15386 emit_move_insn (target, gen_lowpart (tmode, x));
15388 else
15390 rtvec v = rtvec_alloc (n_elt);
15392 for (i = 0; i < n_elt; ++i)
15394 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15395 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15398 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15401 return target;
15404 /* Return the integer constant in ARG. Constrain it to be in the range
15405 of the subparts of VEC_TYPE; issue an error if not. */
15407 static int
15408 get_element_number (tree vec_type, tree arg)
15410 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15412 if (!tree_fits_uhwi_p (arg)
15413 || (elt = tree_to_uhwi (arg), elt > max))
15415 error ("selector must be an integer constant in the range 0..%wi", max);
15416 return 0;
15419 return elt;
15422 /* Expand vec_set builtin. */
15423 static rtx
15424 altivec_expand_vec_set_builtin (tree exp)
15426 machine_mode tmode, mode1;
15427 tree arg0, arg1, arg2;
15428 int elt;
15429 rtx op0, op1;
15431 arg0 = CALL_EXPR_ARG (exp, 0);
15432 arg1 = CALL_EXPR_ARG (exp, 1);
15433 arg2 = CALL_EXPR_ARG (exp, 2);
15435 tmode = TYPE_MODE (TREE_TYPE (arg0));
15436 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15437 gcc_assert (VECTOR_MODE_P (tmode));
15439 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15440 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15441 elt = get_element_number (TREE_TYPE (arg0), arg2);
15443 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15444 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15446 op0 = force_reg (tmode, op0);
15447 op1 = force_reg (mode1, op1);
15449 rs6000_expand_vector_set (op0, op1, elt);
15451 return op0;
15454 /* Expand vec_ext builtin. */
15455 static rtx
15456 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15458 machine_mode tmode, mode0;
15459 tree arg0, arg1;
15460 rtx op0;
15461 rtx op1;
15463 arg0 = CALL_EXPR_ARG (exp, 0);
15464 arg1 = CALL_EXPR_ARG (exp, 1);
15466 op0 = expand_normal (arg0);
15467 op1 = expand_normal (arg1);
15469 /* Call get_element_number to validate arg1 if it is a constant. */
15470 if (TREE_CODE (arg1) == INTEGER_CST)
15471 (void) get_element_number (TREE_TYPE (arg0), arg1);
15473 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15474 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15475 gcc_assert (VECTOR_MODE_P (mode0));
15477 op0 = force_reg (mode0, op0);
15479 if (optimize || !target || !register_operand (target, tmode))
15480 target = gen_reg_rtx (tmode);
15482 rs6000_expand_vector_extract (target, op0, op1);
15484 return target;
15487 /* Expand the builtin in EXP and store the result in TARGET. Store
15488 true in *EXPANDEDP if we found a builtin to expand. */
15489 static rtx
15490 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15492 const struct builtin_description *d;
15493 size_t i;
15494 enum insn_code icode;
15495 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15496 tree arg0, arg1, arg2;
15497 rtx op0, pat;
15498 machine_mode tmode, mode0;
15499 enum rs6000_builtins fcode
15500 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15502 if (rs6000_overloaded_builtin_p (fcode))
15504 *expandedp = true;
15505 error ("unresolved overload for Altivec builtin %qF", fndecl);
15507 /* Given it is invalid, just generate a normal call. */
15508 return expand_call (exp, target, false);
15511 target = altivec_expand_ld_builtin (exp, target, expandedp);
15512 if (*expandedp)
15513 return target;
15515 target = altivec_expand_st_builtin (exp, target, expandedp);
15516 if (*expandedp)
15517 return target;
15519 target = altivec_expand_dst_builtin (exp, target, expandedp);
15520 if (*expandedp)
15521 return target;
15523 *expandedp = true;
15525 switch (fcode)
15527 case ALTIVEC_BUILTIN_STVX_V2DF:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
15529 case ALTIVEC_BUILTIN_STVX_V2DI:
15530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
15531 case ALTIVEC_BUILTIN_STVX_V4SF:
15532 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
15533 case ALTIVEC_BUILTIN_STVX:
15534 case ALTIVEC_BUILTIN_STVX_V4SI:
15535 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
15536 case ALTIVEC_BUILTIN_STVX_V8HI:
15537 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
15538 case ALTIVEC_BUILTIN_STVX_V16QI:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
15540 case ALTIVEC_BUILTIN_STVEBX:
15541 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15542 case ALTIVEC_BUILTIN_STVEHX:
15543 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15544 case ALTIVEC_BUILTIN_STVEWX:
15545 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15546 case ALTIVEC_BUILTIN_STVXL_V2DF:
15547 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15548 case ALTIVEC_BUILTIN_STVXL_V2DI:
15549 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15550 case ALTIVEC_BUILTIN_STVXL_V4SF:
15551 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15552 case ALTIVEC_BUILTIN_STVXL:
15553 case ALTIVEC_BUILTIN_STVXL_V4SI:
15554 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15555 case ALTIVEC_BUILTIN_STVXL_V8HI:
15556 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15557 case ALTIVEC_BUILTIN_STVXL_V16QI:
15558 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15560 case ALTIVEC_BUILTIN_STVLX:
15561 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15562 case ALTIVEC_BUILTIN_STVLXL:
15563 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15564 case ALTIVEC_BUILTIN_STVRX:
15565 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15566 case ALTIVEC_BUILTIN_STVRXL:
15567 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15569 case P9V_BUILTIN_STXVL:
15570 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15572 case P9V_BUILTIN_XST_LEN_R:
15573 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
15575 case VSX_BUILTIN_STXVD2X_V1TI:
15576 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15577 case VSX_BUILTIN_STXVD2X_V2DF:
15578 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15579 case VSX_BUILTIN_STXVD2X_V2DI:
15580 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15581 case VSX_BUILTIN_STXVW4X_V4SF:
15582 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15583 case VSX_BUILTIN_STXVW4X_V4SI:
15584 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15585 case VSX_BUILTIN_STXVW4X_V8HI:
15586 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15587 case VSX_BUILTIN_STXVW4X_V16QI:
15588 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15590 /* For the following on big endian, it's ok to use any appropriate
15591 unaligned-supporting store, so use a generic expander. For
15592 little-endian, the exact element-reversing instruction must
15593 be used. */
15594 case VSX_BUILTIN_ST_ELEMREV_V1TI:
15596 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
15597 : CODE_FOR_vsx_st_elemrev_v1ti);
15598 return altivec_expand_stv_builtin (code, exp);
15600 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15602 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15603 : CODE_FOR_vsx_st_elemrev_v2df);
15604 return altivec_expand_stv_builtin (code, exp);
15606 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15608 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15609 : CODE_FOR_vsx_st_elemrev_v2di);
15610 return altivec_expand_stv_builtin (code, exp);
15612 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15614 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15615 : CODE_FOR_vsx_st_elemrev_v4sf);
15616 return altivec_expand_stv_builtin (code, exp);
15618 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15620 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15621 : CODE_FOR_vsx_st_elemrev_v4si);
15622 return altivec_expand_stv_builtin (code, exp);
15624 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15626 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15627 : CODE_FOR_vsx_st_elemrev_v8hi);
15628 return altivec_expand_stv_builtin (code, exp);
15630 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15632 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15633 : CODE_FOR_vsx_st_elemrev_v16qi);
15634 return altivec_expand_stv_builtin (code, exp);
15637 case ALTIVEC_BUILTIN_MFVSCR:
15638 icode = CODE_FOR_altivec_mfvscr;
15639 tmode = insn_data[icode].operand[0].mode;
15641 if (target == 0
15642 || GET_MODE (target) != tmode
15643 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15644 target = gen_reg_rtx (tmode);
15646 pat = GEN_FCN (icode) (target);
15647 if (! pat)
15648 return 0;
15649 emit_insn (pat);
15650 return target;
15652 case ALTIVEC_BUILTIN_MTVSCR:
15653 icode = CODE_FOR_altivec_mtvscr;
15654 arg0 = CALL_EXPR_ARG (exp, 0);
15655 op0 = expand_normal (arg0);
15656 mode0 = insn_data[icode].operand[0].mode;
15658 /* If we got invalid arguments bail out before generating bad rtl. */
15659 if (arg0 == error_mark_node)
15660 return const0_rtx;
15662 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15663 op0 = copy_to_mode_reg (mode0, op0);
15665 pat = GEN_FCN (icode) (op0);
15666 if (pat)
15667 emit_insn (pat);
15668 return NULL_RTX;
15670 case ALTIVEC_BUILTIN_DSSALL:
15671 emit_insn (gen_altivec_dssall ());
15672 return NULL_RTX;
15674 case ALTIVEC_BUILTIN_DSS:
15675 icode = CODE_FOR_altivec_dss;
15676 arg0 = CALL_EXPR_ARG (exp, 0);
15677 STRIP_NOPS (arg0);
15678 op0 = expand_normal (arg0);
15679 mode0 = insn_data[icode].operand[0].mode;
15681 /* If we got invalid arguments bail out before generating bad rtl. */
15682 if (arg0 == error_mark_node)
15683 return const0_rtx;
15685 if (TREE_CODE (arg0) != INTEGER_CST
15686 || TREE_INT_CST_LOW (arg0) & ~0x3)
15688 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15689 return const0_rtx;
15692 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15693 op0 = copy_to_mode_reg (mode0, op0);
15695 emit_insn (gen_altivec_dss (op0));
15696 return NULL_RTX;
15698 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15699 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15700 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15701 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15702 case VSX_BUILTIN_VEC_INIT_V2DF:
15703 case VSX_BUILTIN_VEC_INIT_V2DI:
15704 case VSX_BUILTIN_VEC_INIT_V1TI:
15705 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15707 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15708 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15709 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15710 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15711 case VSX_BUILTIN_VEC_SET_V2DF:
15712 case VSX_BUILTIN_VEC_SET_V2DI:
15713 case VSX_BUILTIN_VEC_SET_V1TI:
15714 return altivec_expand_vec_set_builtin (exp);
15716 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15717 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15718 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15719 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15720 case VSX_BUILTIN_VEC_EXT_V2DF:
15721 case VSX_BUILTIN_VEC_EXT_V2DI:
15722 case VSX_BUILTIN_VEC_EXT_V1TI:
15723 return altivec_expand_vec_ext_builtin (exp, target);
15725 case P9V_BUILTIN_VEC_EXTRACT4B:
15726 arg1 = CALL_EXPR_ARG (exp, 1);
15727 STRIP_NOPS (arg1);
15729 /* Generate a normal call if it is invalid. */
15730 if (arg1 == error_mark_node)
15731 return expand_call (exp, target, false);
15733 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15735 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15736 return expand_call (exp, target, false);
15738 break;
15740 case P9V_BUILTIN_VEC_INSERT4B:
15741 arg2 = CALL_EXPR_ARG (exp, 2);
15742 STRIP_NOPS (arg2);
15744 /* Generate a normal call if it is invalid. */
15745 if (arg2 == error_mark_node)
15746 return expand_call (exp, target, false);
15748 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15750 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15751 return expand_call (exp, target, false);
15753 break;
15755 default:
15756 break;
15757 /* Fall through. */
15760 /* Expand abs* operations. */
15761 d = bdesc_abs;
15762 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15763 if (d->code == fcode)
15764 return altivec_expand_abs_builtin (d->icode, exp, target);
15766 /* Expand the AltiVec predicates. */
15767 d = bdesc_altivec_preds;
15768 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15769 if (d->code == fcode)
15770 return altivec_expand_predicate_builtin (d->icode, exp, target);
15772 /* LV* are funky. We initialized them differently. */
15773 switch (fcode)
15775 case ALTIVEC_BUILTIN_LVSL:
15776 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15777 exp, target, false);
15778 case ALTIVEC_BUILTIN_LVSR:
15779 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15780 exp, target, false);
15781 case ALTIVEC_BUILTIN_LVEBX:
15782 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15783 exp, target, false);
15784 case ALTIVEC_BUILTIN_LVEHX:
15785 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15786 exp, target, false);
15787 case ALTIVEC_BUILTIN_LVEWX:
15788 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15789 exp, target, false);
15790 case ALTIVEC_BUILTIN_LVXL_V2DF:
15791 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15792 exp, target, false);
15793 case ALTIVEC_BUILTIN_LVXL_V2DI:
15794 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15795 exp, target, false);
15796 case ALTIVEC_BUILTIN_LVXL_V4SF:
15797 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15798 exp, target, false);
15799 case ALTIVEC_BUILTIN_LVXL:
15800 case ALTIVEC_BUILTIN_LVXL_V4SI:
15801 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15802 exp, target, false);
15803 case ALTIVEC_BUILTIN_LVXL_V8HI:
15804 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15805 exp, target, false);
15806 case ALTIVEC_BUILTIN_LVXL_V16QI:
15807 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15808 exp, target, false);
15809 case ALTIVEC_BUILTIN_LVX_V1TI:
15810 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15811 exp, target, false);
15812 case ALTIVEC_BUILTIN_LVX_V2DF:
15813 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15814 exp, target, false);
15815 case ALTIVEC_BUILTIN_LVX_V2DI:
15816 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15817 exp, target, false);
15818 case ALTIVEC_BUILTIN_LVX_V4SF:
15819 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15820 exp, target, false);
15821 case ALTIVEC_BUILTIN_LVX:
15822 case ALTIVEC_BUILTIN_LVX_V4SI:
15823 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15824 exp, target, false);
15825 case ALTIVEC_BUILTIN_LVX_V8HI:
15826 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15827 exp, target, false);
15828 case ALTIVEC_BUILTIN_LVX_V16QI:
15829 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15830 exp, target, false);
15831 case ALTIVEC_BUILTIN_LVLX:
15832 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15833 exp, target, true);
15834 case ALTIVEC_BUILTIN_LVLXL:
15835 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15836 exp, target, true);
15837 case ALTIVEC_BUILTIN_LVRX:
15838 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15839 exp, target, true);
15840 case ALTIVEC_BUILTIN_LVRXL:
15841 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15842 exp, target, true);
15843 case VSX_BUILTIN_LXVD2X_V1TI:
15844 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15845 exp, target, false);
15846 case VSX_BUILTIN_LXVD2X_V2DF:
15847 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15848 exp, target, false);
15849 case VSX_BUILTIN_LXVD2X_V2DI:
15850 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15851 exp, target, false);
15852 case VSX_BUILTIN_LXVW4X_V4SF:
15853 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15854 exp, target, false);
15855 case VSX_BUILTIN_LXVW4X_V4SI:
15856 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15857 exp, target, false);
15858 case VSX_BUILTIN_LXVW4X_V8HI:
15859 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15860 exp, target, false);
15861 case VSX_BUILTIN_LXVW4X_V16QI:
15862 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15863 exp, target, false);
15864 /* For the following on big endian, it's ok to use any appropriate
15865 unaligned-supporting load, so use a generic expander. For
15866 little-endian, the exact element-reversing instruction must
15867 be used. */
15868 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15870 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15871 : CODE_FOR_vsx_ld_elemrev_v2df);
15872 return altivec_expand_lv_builtin (code, exp, target, false);
15874 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15876 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15877 : CODE_FOR_vsx_ld_elemrev_v1ti);
15878 return altivec_expand_lv_builtin (code, exp, target, false);
15880 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15882 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15883 : CODE_FOR_vsx_ld_elemrev_v2di);
15884 return altivec_expand_lv_builtin (code, exp, target, false);
15886 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15888 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15889 : CODE_FOR_vsx_ld_elemrev_v4sf);
15890 return altivec_expand_lv_builtin (code, exp, target, false);
15892 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15894 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15895 : CODE_FOR_vsx_ld_elemrev_v4si);
15896 return altivec_expand_lv_builtin (code, exp, target, false);
15898 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15900 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15901 : CODE_FOR_vsx_ld_elemrev_v8hi);
15902 return altivec_expand_lv_builtin (code, exp, target, false);
15904 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15906 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15907 : CODE_FOR_vsx_ld_elemrev_v16qi);
15908 return altivec_expand_lv_builtin (code, exp, target, false);
15910 break;
15911 default:
15912 break;
15913 /* Fall through. */
15916 *expandedp = false;
15917 return NULL_RTX;
15920 /* Expand the builtin in EXP and store the result in TARGET. Store
15921 true in *EXPANDEDP if we found a builtin to expand. */
15922 static rtx
15923 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15925 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15926 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15927 const struct builtin_description *d;
15928 size_t i;
15930 *expandedp = true;
15932 switch (fcode)
15934 case PAIRED_BUILTIN_STX:
15935 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15936 case PAIRED_BUILTIN_LX:
15937 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15938 default:
15939 break;
15940 /* Fall through. */
15943 /* Expand the paired predicates. */
15944 d = bdesc_paired_preds;
15945 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15946 if (d->code == fcode)
15947 return paired_expand_predicate_builtin (d->icode, exp, target);
15949 *expandedp = false;
15950 return NULL_RTX;
15953 static rtx
15954 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15956 rtx pat, scratch, tmp;
15957 tree form = CALL_EXPR_ARG (exp, 0);
15958 tree arg0 = CALL_EXPR_ARG (exp, 1);
15959 tree arg1 = CALL_EXPR_ARG (exp, 2);
15960 rtx op0 = expand_normal (arg0);
15961 rtx op1 = expand_normal (arg1);
15962 machine_mode mode0 = insn_data[icode].operand[1].mode;
15963 machine_mode mode1 = insn_data[icode].operand[2].mode;
15964 int form_int;
15965 enum rtx_code code;
15967 if (TREE_CODE (form) != INTEGER_CST)
15969 error ("argument 1 of %s must be a constant",
15970 "__builtin_paired_predicate");
15971 return const0_rtx;
15973 else
15974 form_int = TREE_INT_CST_LOW (form);
15976 gcc_assert (mode0 == mode1);
15978 if (arg0 == error_mark_node || arg1 == error_mark_node)
15979 return const0_rtx;
15981 if (target == 0
15982 || GET_MODE (target) != SImode
15983 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15984 target = gen_reg_rtx (SImode);
15985 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15986 op0 = copy_to_mode_reg (mode0, op0);
15987 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15988 op1 = copy_to_mode_reg (mode1, op1);
15990 scratch = gen_reg_rtx (CCFPmode);
15992 pat = GEN_FCN (icode) (scratch, op0, op1);
15993 if (!pat)
15994 return const0_rtx;
15996 emit_insn (pat);
15998 switch (form_int)
16000 /* LT bit. */
16001 case 0:
16002 code = LT;
16003 break;
16004 /* GT bit. */
16005 case 1:
16006 code = GT;
16007 break;
16008 /* EQ bit. */
16009 case 2:
16010 code = EQ;
16011 break;
16012 /* UN bit. */
16013 case 3:
16014 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16015 return target;
16016 default:
16017 error ("argument 1 of %qs is out of range",
16018 "__builtin_paired_predicate");
16019 return const0_rtx;
16022 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16023 emit_move_insn (target, tmp);
16024 return target;
16027 /* Raise an error message for a builtin function that is called without the
16028 appropriate target options being set. */
16030 static void
16031 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16033 size_t uns_fncode = (size_t) fncode;
16034 const char *name = rs6000_builtin_info[uns_fncode].name;
16035 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16037 gcc_assert (name != NULL);
16038 if ((fnmask & RS6000_BTM_CELL) != 0)
16039 error ("builtin function %qs is only valid for the cell processor", name);
16040 else if ((fnmask & RS6000_BTM_VSX) != 0)
16041 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16042 else if ((fnmask & RS6000_BTM_HTM) != 0)
16043 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16044 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16045 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16046 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16047 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16048 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16049 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16050 error ("builtin function %qs requires the %qs and %qs options",
16051 name, "-mhard-dfp", "-mpower8-vector");
16052 else if ((fnmask & RS6000_BTM_DFP) != 0)
16053 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16054 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16055 error ("builtin function %qs requires the %qs option", name,
16056 "-mpower8-vector");
16057 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16058 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16059 error ("builtin function %qs requires the %qs and %qs options",
16060 name, "-mcpu=power9", "-m64");
16061 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16062 error ("builtin function %qs requires the %qs option", name,
16063 "-mcpu=power9");
16064 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16065 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16066 error ("builtin function %qs requires the %qs and %qs options",
16067 name, "-mcpu=power9", "-m64");
16068 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16069 error ("builtin function %qs requires the %qs option", name,
16070 "-mcpu=power9");
16071 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16072 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16073 error ("builtin function %qs requires the %qs and %qs options",
16074 name, "-mhard-float", "-mlong-double-128");
16075 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16076 error ("builtin function %qs requires the %qs option", name,
16077 "-mhard-float");
16078 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
16079 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16080 name);
16081 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16082 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16083 else
16084 error ("builtin function %qs is not supported with the current options",
16085 name);
16088 /* Target hook for early folding of built-ins, shamelessly stolen
16089 from ia64.c. */
16091 static tree
16092 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
16093 int n_args ATTRIBUTE_UNUSED,
16094 tree *args ATTRIBUTE_UNUSED,
16095 bool ignore ATTRIBUTE_UNUSED)
16097 #ifdef SUBTARGET_FOLD_BUILTIN
16098 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16099 #else
16100 return NULL_TREE;
16101 #endif
16104 /* Helper function to sort out which built-ins may be valid without having
16105 a LHS. */
16106 static bool
16107 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
16109 switch (fn_code)
16111 case ALTIVEC_BUILTIN_STVX_V16QI:
16112 case ALTIVEC_BUILTIN_STVX_V8HI:
16113 case ALTIVEC_BUILTIN_STVX_V4SI:
16114 case ALTIVEC_BUILTIN_STVX_V4SF:
16115 case ALTIVEC_BUILTIN_STVX_V2DI:
16116 case ALTIVEC_BUILTIN_STVX_V2DF:
16117 return true;
16118 default:
16119 return false;
16123 /* Helper function to handle the gimple folding of a vector compare
16124 operation. This sets up true/false vectors, and uses the
16125 VEC_COND_EXPR operation.
16126 CODE indicates which comparison is to be made. (EQ, GT, ...).
16127 TYPE indicates the type of the result. */
16128 static tree
16129 fold_build_vec_cmp (tree_code code, tree type,
16130 tree arg0, tree arg1)
16132 tree cmp_type = build_same_sized_truth_vector_type (type);
16133 tree zero_vec = build_zero_cst (type);
16134 tree minus_one_vec = build_minus_one_cst (type);
16135 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
16136 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
16139 /* Helper function to handle the in-between steps for the
16140 vector compare built-ins. */
16141 static void
16142 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
16144 tree arg0 = gimple_call_arg (stmt, 0);
16145 tree arg1 = gimple_call_arg (stmt, 1);
16146 tree lhs = gimple_call_lhs (stmt);
16147 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
16148 gimple *g = gimple_build_assign (lhs, cmp);
16149 gimple_set_location (g, gimple_location (stmt));
16150 gsi_replace (gsi, g, true);
16153 /* Helper function to handle the vector merge[hl] built-ins. The
16154 implementation difference between h and l versions for this code are in
16155 the values used when building of the permute vector for high word versus
16156 low word merge. The variance is keyed off the use_high parameter. */
16157 static void
16158 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
16160 tree arg0 = gimple_call_arg (stmt, 0);
16161 tree arg1 = gimple_call_arg (stmt, 1);
16162 tree lhs = gimple_call_lhs (stmt);
16163 tree lhs_type = TREE_TYPE (lhs);
16164 tree lhs_type_type = TREE_TYPE (lhs_type);
16165 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
16166 int midpoint = n_elts / 2;
16167 int offset = 0;
16169 if (use_high == 1)
16170 offset = midpoint;
16172 tree_vector_builder elts (lhs_type, VECTOR_CST_NELTS (arg0), 1);
16174 for (int i = 0; i < midpoint; i++)
16176 elts.safe_push (build_int_cst (lhs_type_type, offset + i));
16177 elts.safe_push (build_int_cst (lhs_type_type, offset + n_elts + i));
16180 tree permute = elts.build ();
16182 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
16183 gimple_set_location (g, gimple_location (stmt));
16184 gsi_replace (gsi, g, true);
16187 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16188 a constant, use rs6000_fold_builtin.) */
16190 bool
16191 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16193 gimple *stmt = gsi_stmt (*gsi);
16194 tree fndecl = gimple_call_fndecl (stmt);
16195 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16196 enum rs6000_builtins fn_code
16197 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16198 tree arg0, arg1, lhs, temp;
16199 gimple *g;
16201 size_t uns_fncode = (size_t) fn_code;
16202 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16203 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16204 const char *fn_name2 = (icode != CODE_FOR_nothing)
16205 ? get_insn_name ((int) icode)
16206 : "nothing";
16208 if (TARGET_DEBUG_BUILTIN)
16209 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16210 fn_code, fn_name1, fn_name2);
16212 if (!rs6000_fold_gimple)
16213 return false;
16215 /* Prevent gimple folding for code that does not have a LHS, unless it is
16216 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16217 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
16218 return false;
16220 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
16221 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
16222 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
16223 if (!func_valid_p)
16224 return false;
16226 switch (fn_code)
16228 /* Flavors of vec_add. We deliberately don't expand
16229 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16230 TImode, resulting in much poorer code generation. */
16231 case ALTIVEC_BUILTIN_VADDUBM:
16232 case ALTIVEC_BUILTIN_VADDUHM:
16233 case ALTIVEC_BUILTIN_VADDUWM:
16234 case P8V_BUILTIN_VADDUDM:
16235 case ALTIVEC_BUILTIN_VADDFP:
16236 case VSX_BUILTIN_XVADDDP:
16237 arg0 = gimple_call_arg (stmt, 0);
16238 arg1 = gimple_call_arg (stmt, 1);
16239 lhs = gimple_call_lhs (stmt);
16240 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16241 gimple_set_location (g, gimple_location (stmt));
16242 gsi_replace (gsi, g, true);
16243 return true;
16244 /* Flavors of vec_sub. We deliberately don't expand
16245 P8V_BUILTIN_VSUBUQM. */
16246 case ALTIVEC_BUILTIN_VSUBUBM:
16247 case ALTIVEC_BUILTIN_VSUBUHM:
16248 case ALTIVEC_BUILTIN_VSUBUWM:
16249 case P8V_BUILTIN_VSUBUDM:
16250 case ALTIVEC_BUILTIN_VSUBFP:
16251 case VSX_BUILTIN_XVSUBDP:
16252 arg0 = gimple_call_arg (stmt, 0);
16253 arg1 = gimple_call_arg (stmt, 1);
16254 lhs = gimple_call_lhs (stmt);
16255 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16256 gimple_set_location (g, gimple_location (stmt));
16257 gsi_replace (gsi, g, true);
16258 return true;
16259 case VSX_BUILTIN_XVMULSP:
16260 case VSX_BUILTIN_XVMULDP:
16261 arg0 = gimple_call_arg (stmt, 0);
16262 arg1 = gimple_call_arg (stmt, 1);
16263 lhs = gimple_call_lhs (stmt);
16264 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16265 gimple_set_location (g, gimple_location (stmt));
16266 gsi_replace (gsi, g, true);
16267 return true;
16268 /* Even element flavors of vec_mul (signed). */
16269 case ALTIVEC_BUILTIN_VMULESB:
16270 case ALTIVEC_BUILTIN_VMULESH:
16271 case P8V_BUILTIN_VMULESW:
16272 /* Even element flavors of vec_mul (unsigned). */
16273 case ALTIVEC_BUILTIN_VMULEUB:
16274 case ALTIVEC_BUILTIN_VMULEUH:
16275 case P8V_BUILTIN_VMULEUW:
16276 arg0 = gimple_call_arg (stmt, 0);
16277 arg1 = gimple_call_arg (stmt, 1);
16278 lhs = gimple_call_lhs (stmt);
16279 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16280 gimple_set_location (g, gimple_location (stmt));
16281 gsi_replace (gsi, g, true);
16282 return true;
16283 /* Odd element flavors of vec_mul (signed). */
16284 case ALTIVEC_BUILTIN_VMULOSB:
16285 case ALTIVEC_BUILTIN_VMULOSH:
16286 case P8V_BUILTIN_VMULOSW:
16287 /* Odd element flavors of vec_mul (unsigned). */
16288 case ALTIVEC_BUILTIN_VMULOUB:
16289 case ALTIVEC_BUILTIN_VMULOUH:
16290 case P8V_BUILTIN_VMULOUW:
16291 arg0 = gimple_call_arg (stmt, 0);
16292 arg1 = gimple_call_arg (stmt, 1);
16293 lhs = gimple_call_lhs (stmt);
16294 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16295 gimple_set_location (g, gimple_location (stmt));
16296 gsi_replace (gsi, g, true);
16297 return true;
16298 /* Flavors of vec_div (Integer). */
16299 case VSX_BUILTIN_DIV_V2DI:
16300 case VSX_BUILTIN_UDIV_V2DI:
16301 arg0 = gimple_call_arg (stmt, 0);
16302 arg1 = gimple_call_arg (stmt, 1);
16303 lhs = gimple_call_lhs (stmt);
16304 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16305 gimple_set_location (g, gimple_location (stmt));
16306 gsi_replace (gsi, g, true);
16307 return true;
16308 /* Flavors of vec_div (Float). */
16309 case VSX_BUILTIN_XVDIVSP:
16310 case VSX_BUILTIN_XVDIVDP:
16311 arg0 = gimple_call_arg (stmt, 0);
16312 arg1 = gimple_call_arg (stmt, 1);
16313 lhs = gimple_call_lhs (stmt);
16314 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16315 gimple_set_location (g, gimple_location (stmt));
16316 gsi_replace (gsi, g, true);
16317 return true;
16318 /* Flavors of vec_and. */
16319 case ALTIVEC_BUILTIN_VAND:
16320 arg0 = gimple_call_arg (stmt, 0);
16321 arg1 = gimple_call_arg (stmt, 1);
16322 lhs = gimple_call_lhs (stmt);
16323 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16324 gimple_set_location (g, gimple_location (stmt));
16325 gsi_replace (gsi, g, true);
16326 return true;
16327 /* Flavors of vec_andc. */
16328 case ALTIVEC_BUILTIN_VANDC:
16329 arg0 = gimple_call_arg (stmt, 0);
16330 arg1 = gimple_call_arg (stmt, 1);
16331 lhs = gimple_call_lhs (stmt);
16332 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16333 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16334 gimple_set_location (g, gimple_location (stmt));
16335 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16336 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16337 gimple_set_location (g, gimple_location (stmt));
16338 gsi_replace (gsi, g, true);
16339 return true;
16340 /* Flavors of vec_nand. */
16341 case P8V_BUILTIN_VEC_NAND:
16342 case P8V_BUILTIN_NAND_V16QI:
16343 case P8V_BUILTIN_NAND_V8HI:
16344 case P8V_BUILTIN_NAND_V4SI:
16345 case P8V_BUILTIN_NAND_V4SF:
16346 case P8V_BUILTIN_NAND_V2DF:
16347 case P8V_BUILTIN_NAND_V2DI:
16348 arg0 = gimple_call_arg (stmt, 0);
16349 arg1 = gimple_call_arg (stmt, 1);
16350 lhs = gimple_call_lhs (stmt);
16351 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16352 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
16353 gimple_set_location (g, gimple_location (stmt));
16354 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16355 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16356 gimple_set_location (g, gimple_location (stmt));
16357 gsi_replace (gsi, g, true);
16358 return true;
16359 /* Flavors of vec_or. */
16360 case ALTIVEC_BUILTIN_VOR:
16361 arg0 = gimple_call_arg (stmt, 0);
16362 arg1 = gimple_call_arg (stmt, 1);
16363 lhs = gimple_call_lhs (stmt);
16364 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16365 gimple_set_location (g, gimple_location (stmt));
16366 gsi_replace (gsi, g, true);
16367 return true;
16368 /* flavors of vec_orc. */
16369 case P8V_BUILTIN_ORC_V16QI:
16370 case P8V_BUILTIN_ORC_V8HI:
16371 case P8V_BUILTIN_ORC_V4SI:
16372 case P8V_BUILTIN_ORC_V4SF:
16373 case P8V_BUILTIN_ORC_V2DF:
16374 case P8V_BUILTIN_ORC_V2DI:
16375 arg0 = gimple_call_arg (stmt, 0);
16376 arg1 = gimple_call_arg (stmt, 1);
16377 lhs = gimple_call_lhs (stmt);
16378 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16379 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16380 gimple_set_location (g, gimple_location (stmt));
16381 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16382 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16383 gimple_set_location (g, gimple_location (stmt));
16384 gsi_replace (gsi, g, true);
16385 return true;
16386 /* Flavors of vec_xor. */
16387 case ALTIVEC_BUILTIN_VXOR:
16388 arg0 = gimple_call_arg (stmt, 0);
16389 arg1 = gimple_call_arg (stmt, 1);
16390 lhs = gimple_call_lhs (stmt);
16391 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16392 gimple_set_location (g, gimple_location (stmt));
16393 gsi_replace (gsi, g, true);
16394 return true;
16395 /* Flavors of vec_nor. */
16396 case ALTIVEC_BUILTIN_VNOR:
16397 arg0 = gimple_call_arg (stmt, 0);
16398 arg1 = gimple_call_arg (stmt, 1);
16399 lhs = gimple_call_lhs (stmt);
16400 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16401 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16402 gimple_set_location (g, gimple_location (stmt));
16403 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16404 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16405 gimple_set_location (g, gimple_location (stmt));
16406 gsi_replace (gsi, g, true);
16407 return true;
16408 /* flavors of vec_abs. */
16409 case ALTIVEC_BUILTIN_ABS_V16QI:
16410 case ALTIVEC_BUILTIN_ABS_V8HI:
16411 case ALTIVEC_BUILTIN_ABS_V4SI:
16412 case ALTIVEC_BUILTIN_ABS_V4SF:
16413 case P8V_BUILTIN_ABS_V2DI:
16414 case VSX_BUILTIN_XVABSDP:
16415 arg0 = gimple_call_arg (stmt, 0);
16416 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16417 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16418 return false;
16419 lhs = gimple_call_lhs (stmt);
16420 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16421 gimple_set_location (g, gimple_location (stmt));
16422 gsi_replace (gsi, g, true);
16423 return true;
16424 /* flavors of vec_min. */
16425 case VSX_BUILTIN_XVMINDP:
16426 case P8V_BUILTIN_VMINSD:
16427 case P8V_BUILTIN_VMINUD:
16428 case ALTIVEC_BUILTIN_VMINSB:
16429 case ALTIVEC_BUILTIN_VMINSH:
16430 case ALTIVEC_BUILTIN_VMINSW:
16431 case ALTIVEC_BUILTIN_VMINUB:
16432 case ALTIVEC_BUILTIN_VMINUH:
16433 case ALTIVEC_BUILTIN_VMINUW:
16434 case ALTIVEC_BUILTIN_VMINFP:
16435 arg0 = gimple_call_arg (stmt, 0);
16436 arg1 = gimple_call_arg (stmt, 1);
16437 lhs = gimple_call_lhs (stmt);
16438 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16439 gimple_set_location (g, gimple_location (stmt));
16440 gsi_replace (gsi, g, true);
16441 return true;
16442 /* flavors of vec_max. */
16443 case VSX_BUILTIN_XVMAXDP:
16444 case P8V_BUILTIN_VMAXSD:
16445 case P8V_BUILTIN_VMAXUD:
16446 case ALTIVEC_BUILTIN_VMAXSB:
16447 case ALTIVEC_BUILTIN_VMAXSH:
16448 case ALTIVEC_BUILTIN_VMAXSW:
16449 case ALTIVEC_BUILTIN_VMAXUB:
16450 case ALTIVEC_BUILTIN_VMAXUH:
16451 case ALTIVEC_BUILTIN_VMAXUW:
16452 case ALTIVEC_BUILTIN_VMAXFP:
16453 arg0 = gimple_call_arg (stmt, 0);
16454 arg1 = gimple_call_arg (stmt, 1);
16455 lhs = gimple_call_lhs (stmt);
16456 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16457 gimple_set_location (g, gimple_location (stmt));
16458 gsi_replace (gsi, g, true);
16459 return true;
16460 /* Flavors of vec_eqv. */
16461 case P8V_BUILTIN_EQV_V16QI:
16462 case P8V_BUILTIN_EQV_V8HI:
16463 case P8V_BUILTIN_EQV_V4SI:
16464 case P8V_BUILTIN_EQV_V4SF:
16465 case P8V_BUILTIN_EQV_V2DF:
16466 case P8V_BUILTIN_EQV_V2DI:
16467 arg0 = gimple_call_arg (stmt, 0);
16468 arg1 = gimple_call_arg (stmt, 1);
16469 lhs = gimple_call_lhs (stmt);
16470 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16471 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16472 gimple_set_location (g, gimple_location (stmt));
16473 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16474 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16475 gimple_set_location (g, gimple_location (stmt));
16476 gsi_replace (gsi, g, true);
16477 return true;
16478 /* Flavors of vec_rotate_left. */
16479 case ALTIVEC_BUILTIN_VRLB:
16480 case ALTIVEC_BUILTIN_VRLH:
16481 case ALTIVEC_BUILTIN_VRLW:
16482 case P8V_BUILTIN_VRLD:
16483 arg0 = gimple_call_arg (stmt, 0);
16484 arg1 = gimple_call_arg (stmt, 1);
16485 lhs = gimple_call_lhs (stmt);
16486 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16487 gimple_set_location (g, gimple_location (stmt));
16488 gsi_replace (gsi, g, true);
16489 return true;
16490 /* Flavors of vector shift right algebraic.
16491 vec_sra{b,h,w} -> vsra{b,h,w}. */
16492 case ALTIVEC_BUILTIN_VSRAB:
16493 case ALTIVEC_BUILTIN_VSRAH:
16494 case ALTIVEC_BUILTIN_VSRAW:
16495 case P8V_BUILTIN_VSRAD:
16496 arg0 = gimple_call_arg (stmt, 0);
16497 arg1 = gimple_call_arg (stmt, 1);
16498 lhs = gimple_call_lhs (stmt);
16499 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16500 gimple_set_location (g, gimple_location (stmt));
16501 gsi_replace (gsi, g, true);
16502 return true;
16503 /* Flavors of vector shift left.
16504 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16505 case ALTIVEC_BUILTIN_VSLB:
16506 case ALTIVEC_BUILTIN_VSLH:
16507 case ALTIVEC_BUILTIN_VSLW:
16508 case P8V_BUILTIN_VSLD:
16509 arg0 = gimple_call_arg (stmt, 0);
16510 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16511 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16512 return false;
16513 arg1 = gimple_call_arg (stmt, 1);
16514 lhs = gimple_call_lhs (stmt);
16515 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16516 gimple_set_location (g, gimple_location (stmt));
16517 gsi_replace (gsi, g, true);
16518 return true;
16519 /* Flavors of vector shift right. */
16520 case ALTIVEC_BUILTIN_VSRB:
16521 case ALTIVEC_BUILTIN_VSRH:
16522 case ALTIVEC_BUILTIN_VSRW:
16523 case P8V_BUILTIN_VSRD:
16525 arg0 = gimple_call_arg (stmt, 0);
16526 arg1 = gimple_call_arg (stmt, 1);
16527 lhs = gimple_call_lhs (stmt);
16528 gimple_seq stmts = NULL;
16529 /* Convert arg0 to unsigned. */
16530 tree arg0_unsigned
16531 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16532 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16533 tree res
16534 = gimple_build (&stmts, RSHIFT_EXPR,
16535 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16536 /* Convert result back to the lhs type. */
16537 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16538 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16539 update_call_from_tree (gsi, res);
16540 return true;
16542 /* Vector loads. */
16543 case ALTIVEC_BUILTIN_LVX_V16QI:
16544 case ALTIVEC_BUILTIN_LVX_V8HI:
16545 case ALTIVEC_BUILTIN_LVX_V4SI:
16546 case ALTIVEC_BUILTIN_LVX_V4SF:
16547 case ALTIVEC_BUILTIN_LVX_V2DI:
16548 case ALTIVEC_BUILTIN_LVX_V2DF:
16549 case ALTIVEC_BUILTIN_LVX_V1TI:
16551 arg0 = gimple_call_arg (stmt, 0); // offset
16552 arg1 = gimple_call_arg (stmt, 1); // address
16553 /* Do not fold for -maltivec=be on LE targets. */
16554 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16555 return false;
16556 lhs = gimple_call_lhs (stmt);
16557 location_t loc = gimple_location (stmt);
16558 /* Since arg1 may be cast to a different type, just use ptr_type_node
16559 here instead of trying to enforce TBAA on pointer types. */
16560 tree arg1_type = ptr_type_node;
16561 tree lhs_type = TREE_TYPE (lhs);
16562 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16563 the tree using the value from arg0. The resulting type will match
16564 the type of arg1. */
16565 gimple_seq stmts = NULL;
16566 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16567 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16568 arg1_type, arg1, temp_offset);
16569 /* Mask off any lower bits from the address. */
16570 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16571 arg1_type, temp_addr,
16572 build_int_cst (arg1_type, -16));
16573 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16574 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16575 take an offset, but since we've already incorporated the offset
16576 above, here we just pass in a zero. */
16577 gimple *g
16578 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16579 build_int_cst (arg1_type, 0)));
16580 gimple_set_location (g, loc);
16581 gsi_replace (gsi, g, true);
16582 return true;
16584 /* Vector stores. */
16585 case ALTIVEC_BUILTIN_STVX_V16QI:
16586 case ALTIVEC_BUILTIN_STVX_V8HI:
16587 case ALTIVEC_BUILTIN_STVX_V4SI:
16588 case ALTIVEC_BUILTIN_STVX_V4SF:
16589 case ALTIVEC_BUILTIN_STVX_V2DI:
16590 case ALTIVEC_BUILTIN_STVX_V2DF:
16592 /* Do not fold for -maltivec=be on LE targets. */
16593 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16594 return false;
16595 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16596 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16597 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16598 location_t loc = gimple_location (stmt);
16599 tree arg0_type = TREE_TYPE (arg0);
16600 /* Use ptr_type_node (no TBAA) for the arg2_type.
16601 FIXME: (Richard) "A proper fix would be to transition this type as
16602 seen from the frontend to GIMPLE, for example in a similar way we
16603 do for MEM_REFs by piggy-backing that on an extra argument, a
16604 constant zero pointer of the alias pointer type to use (which would
16605 also serve as a type indicator of the store itself). I'd use a
16606 target specific internal function for this (not sure if we can have
16607 those target specific, but I guess if it's folded away then that's
16608 fine) and get away with the overload set." */
16609 tree arg2_type = ptr_type_node;
16610 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16611 the tree using the value from arg0. The resulting type will match
16612 the type of arg2. */
16613 gimple_seq stmts = NULL;
16614 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16615 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16616 arg2_type, arg2, temp_offset);
16617 /* Mask off any lower bits from the address. */
16618 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16619 arg2_type, temp_addr,
16620 build_int_cst (arg2_type, -16));
16621 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16622 /* The desired gimple result should be similar to:
16623 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16624 gimple *g
16625 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
16626 build_int_cst (arg2_type, 0)), arg0);
16627 gimple_set_location (g, loc);
16628 gsi_replace (gsi, g, true);
16629 return true;
16632 /* Vector Fused multiply-add (fma). */
16633 case ALTIVEC_BUILTIN_VMADDFP:
16634 case VSX_BUILTIN_XVMADDDP:
16635 case ALTIVEC_BUILTIN_VMLADDUHM:
16637 arg0 = gimple_call_arg (stmt, 0);
16638 arg1 = gimple_call_arg (stmt, 1);
16639 tree arg2 = gimple_call_arg (stmt, 2);
16640 lhs = gimple_call_lhs (stmt);
16641 gimple *g = gimple_build_assign (lhs, FMA_EXPR, arg0, arg1, arg2);
16642 gimple_set_location (g, gimple_location (stmt));
16643 gsi_replace (gsi, g, true);
16644 return true;
16647 /* Vector compares; EQ, NE, GE, GT, LE. */
16648 case ALTIVEC_BUILTIN_VCMPEQUB:
16649 case ALTIVEC_BUILTIN_VCMPEQUH:
16650 case ALTIVEC_BUILTIN_VCMPEQUW:
16651 case P8V_BUILTIN_VCMPEQUD:
16652 fold_compare_helper (gsi, EQ_EXPR, stmt);
16653 return true;
16655 case P9V_BUILTIN_CMPNEB:
16656 case P9V_BUILTIN_CMPNEH:
16657 case P9V_BUILTIN_CMPNEW:
16658 fold_compare_helper (gsi, NE_EXPR, stmt);
16659 return true;
16661 case VSX_BUILTIN_CMPGE_16QI:
16662 case VSX_BUILTIN_CMPGE_U16QI:
16663 case VSX_BUILTIN_CMPGE_8HI:
16664 case VSX_BUILTIN_CMPGE_U8HI:
16665 case VSX_BUILTIN_CMPGE_4SI:
16666 case VSX_BUILTIN_CMPGE_U4SI:
16667 case VSX_BUILTIN_CMPGE_2DI:
16668 case VSX_BUILTIN_CMPGE_U2DI:
16669 fold_compare_helper (gsi, GE_EXPR, stmt);
16670 return true;
16672 case ALTIVEC_BUILTIN_VCMPGTSB:
16673 case ALTIVEC_BUILTIN_VCMPGTUB:
16674 case ALTIVEC_BUILTIN_VCMPGTSH:
16675 case ALTIVEC_BUILTIN_VCMPGTUH:
16676 case ALTIVEC_BUILTIN_VCMPGTSW:
16677 case ALTIVEC_BUILTIN_VCMPGTUW:
16678 case P8V_BUILTIN_VCMPGTUD:
16679 case P8V_BUILTIN_VCMPGTSD:
16680 fold_compare_helper (gsi, GT_EXPR, stmt);
16681 return true;
16683 case VSX_BUILTIN_CMPLE_16QI:
16684 case VSX_BUILTIN_CMPLE_U16QI:
16685 case VSX_BUILTIN_CMPLE_8HI:
16686 case VSX_BUILTIN_CMPLE_U8HI:
16687 case VSX_BUILTIN_CMPLE_4SI:
16688 case VSX_BUILTIN_CMPLE_U4SI:
16689 case VSX_BUILTIN_CMPLE_2DI:
16690 case VSX_BUILTIN_CMPLE_U2DI:
16691 fold_compare_helper (gsi, LE_EXPR, stmt);
16692 return true;
16694 /* flavors of vec_splat_[us]{8,16,32}. */
16695 case ALTIVEC_BUILTIN_VSPLTISB:
16696 case ALTIVEC_BUILTIN_VSPLTISH:
16697 case ALTIVEC_BUILTIN_VSPLTISW:
16699 arg0 = gimple_call_arg (stmt, 0);
16700 lhs = gimple_call_lhs (stmt);
16701 /* Only fold the vec_splat_*() if arg0 is constant. */
16702 if (TREE_CODE (arg0) != INTEGER_CST)
16703 return false;
16704 gimple_seq stmts = NULL;
16705 location_t loc = gimple_location (stmt);
16706 tree splat_value = gimple_convert (&stmts, loc,
16707 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16708 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16709 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16710 g = gimple_build_assign (lhs, splat_tree);
16711 gimple_set_location (g, gimple_location (stmt));
16712 gsi_replace (gsi, g, true);
16713 return true;
16716 /* vec_mergel (integrals). */
16717 case ALTIVEC_BUILTIN_VMRGLH:
16718 case ALTIVEC_BUILTIN_VMRGLW:
16719 case VSX_BUILTIN_XXMRGLW_4SI:
16720 case ALTIVEC_BUILTIN_VMRGLB:
16721 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16722 /* Do not fold for -maltivec=be on LE targets. */
16723 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16724 return false;
16725 fold_mergehl_helper (gsi, stmt, 1);
16726 return true;
16727 /* vec_mergeh (integrals). */
16728 case ALTIVEC_BUILTIN_VMRGHH:
16729 case ALTIVEC_BUILTIN_VMRGHW:
16730 case VSX_BUILTIN_XXMRGHW_4SI:
16731 case ALTIVEC_BUILTIN_VMRGHB:
16732 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16733 /* Do not fold for -maltivec=be on LE targets. */
16734 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16735 return false;
16736 fold_mergehl_helper (gsi, stmt, 0);
16737 return true;
16738 default:
16739 if (TARGET_DEBUG_BUILTIN)
16740 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16741 fn_code, fn_name1, fn_name2);
16742 break;
16745 return false;
16748 /* Expand an expression EXP that calls a built-in function,
16749 with result going to TARGET if that's convenient
16750 (and in mode MODE if that's convenient).
16751 SUBTARGET may be used as the target for computing one of EXP's operands.
16752 IGNORE is nonzero if the value is to be ignored. */
16754 static rtx
16755 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16756 machine_mode mode ATTRIBUTE_UNUSED,
16757 int ignore ATTRIBUTE_UNUSED)
16759 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16760 enum rs6000_builtins fcode
16761 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16762 size_t uns_fcode = (size_t)fcode;
16763 const struct builtin_description *d;
16764 size_t i;
16765 rtx ret;
16766 bool success;
16767 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16768 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16769 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16771 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16772 floating point type, depending on whether long double is the IBM extended
16773 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16774 we only define one variant of the built-in function, and switch the code
16775 when defining it, rather than defining two built-ins and using the
16776 overload table in rs6000-c.c to switch between the two. If we don't have
16777 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16778 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16779 #ifdef HAVE_AS_POWER9
16780 if (FLOAT128_IEEE_P (TFmode))
16781 switch (icode)
16783 default:
16784 break;
16786 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16787 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16788 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16789 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16790 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16791 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16792 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16793 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16794 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16795 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16796 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16797 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16798 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16800 #endif
16802 if (TARGET_DEBUG_BUILTIN)
16804 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16805 const char *name2 = (icode != CODE_FOR_nothing)
16806 ? get_insn_name ((int) icode)
16807 : "nothing";
16808 const char *name3;
16810 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16812 default: name3 = "unknown"; break;
16813 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16814 case RS6000_BTC_UNARY: name3 = "unary"; break;
16815 case RS6000_BTC_BINARY: name3 = "binary"; break;
16816 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16817 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16818 case RS6000_BTC_ABS: name3 = "abs"; break;
16819 case RS6000_BTC_DST: name3 = "dst"; break;
16823 fprintf (stderr,
16824 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16825 (name1) ? name1 : "---", fcode,
16826 (name2) ? name2 : "---", (int) icode,
16827 name3,
16828 func_valid_p ? "" : ", not valid");
16831 if (!func_valid_p)
16833 rs6000_invalid_builtin (fcode);
16835 /* Given it is invalid, just generate a normal call. */
16836 return expand_call (exp, target, ignore);
16839 switch (fcode)
16841 case RS6000_BUILTIN_RECIP:
16842 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16844 case RS6000_BUILTIN_RECIPF:
16845 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16847 case RS6000_BUILTIN_RSQRTF:
16848 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16850 case RS6000_BUILTIN_RSQRT:
16851 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16853 case POWER7_BUILTIN_BPERMD:
16854 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16855 ? CODE_FOR_bpermd_di
16856 : CODE_FOR_bpermd_si), exp, target);
16858 case RS6000_BUILTIN_GET_TB:
16859 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16860 target);
16862 case RS6000_BUILTIN_MFTB:
16863 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16864 ? CODE_FOR_rs6000_mftb_di
16865 : CODE_FOR_rs6000_mftb_si),
16866 target);
16868 case RS6000_BUILTIN_MFFS:
16869 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16871 case RS6000_BUILTIN_MTFSF:
16872 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16874 case RS6000_BUILTIN_CPU_INIT:
16875 case RS6000_BUILTIN_CPU_IS:
16876 case RS6000_BUILTIN_CPU_SUPPORTS:
16877 return cpu_expand_builtin (fcode, exp, target);
16879 case MISC_BUILTIN_SPEC_BARRIER:
16881 emit_insn (gen_rs6000_speculation_barrier ());
16882 return NULL_RTX;
16885 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16886 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16888 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16889 : (int) CODE_FOR_altivec_lvsl_direct);
16890 machine_mode tmode = insn_data[icode2].operand[0].mode;
16891 machine_mode mode = insn_data[icode2].operand[1].mode;
16892 tree arg;
16893 rtx op, addr, pat;
16895 gcc_assert (TARGET_ALTIVEC);
16897 arg = CALL_EXPR_ARG (exp, 0);
16898 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16899 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16900 addr = memory_address (mode, op);
16901 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16902 op = addr;
16903 else
16905 /* For the load case need to negate the address. */
16906 op = gen_reg_rtx (GET_MODE (addr));
16907 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16909 op = gen_rtx_MEM (mode, op);
16911 if (target == 0
16912 || GET_MODE (target) != tmode
16913 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16914 target = gen_reg_rtx (tmode);
16916 pat = GEN_FCN (icode2) (target, op);
16917 if (!pat)
16918 return 0;
16919 emit_insn (pat);
16921 return target;
16924 case ALTIVEC_BUILTIN_VCFUX:
16925 case ALTIVEC_BUILTIN_VCFSX:
16926 case ALTIVEC_BUILTIN_VCTUXS:
16927 case ALTIVEC_BUILTIN_VCTSXS:
16928 /* FIXME: There's got to be a nicer way to handle this case than
16929 constructing a new CALL_EXPR. */
16930 if (call_expr_nargs (exp) == 1)
16932 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16933 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16935 break;
16937 default:
16938 break;
16941 if (TARGET_ALTIVEC)
16943 ret = altivec_expand_builtin (exp, target, &success);
16945 if (success)
16946 return ret;
16948 if (TARGET_PAIRED_FLOAT)
16950 ret = paired_expand_builtin (exp, target, &success);
16952 if (success)
16953 return ret;
16955 if (TARGET_HTM)
16957 ret = htm_expand_builtin (exp, target, &success);
16959 if (success)
16960 return ret;
16963 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16964 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16965 gcc_assert (attr == RS6000_BTC_UNARY
16966 || attr == RS6000_BTC_BINARY
16967 || attr == RS6000_BTC_TERNARY
16968 || attr == RS6000_BTC_SPECIAL);
16970 /* Handle simple unary operations. */
16971 d = bdesc_1arg;
16972 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16973 if (d->code == fcode)
16974 return rs6000_expand_unop_builtin (icode, exp, target);
16976 /* Handle simple binary operations. */
16977 d = bdesc_2arg;
16978 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16979 if (d->code == fcode)
16980 return rs6000_expand_binop_builtin (icode, exp, target);
16982 /* Handle simple ternary operations. */
16983 d = bdesc_3arg;
16984 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16985 if (d->code == fcode)
16986 return rs6000_expand_ternop_builtin (icode, exp, target);
16988 /* Handle simple no-argument operations. */
16989 d = bdesc_0arg;
16990 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16991 if (d->code == fcode)
16992 return rs6000_expand_zeroop_builtin (icode, target);
16994 gcc_unreachable ();
16997 /* Create a builtin vector type with a name. Taking care not to give
16998 the canonical type a name. */
17000 static tree
17001 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
17003 tree result = build_vector_type (elt_type, num_elts);
17005 /* Copy so we don't give the canonical type a name. */
17006 result = build_variant_type_copy (result);
17008 add_builtin_type (name, result);
17010 return result;
17013 static void
17014 rs6000_init_builtins (void)
17016 tree tdecl;
17017 tree ftype;
17018 machine_mode mode;
17020 if (TARGET_DEBUG_BUILTIN)
17021 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
17022 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
17023 (TARGET_ALTIVEC) ? ", altivec" : "",
17024 (TARGET_VSX) ? ", vsx" : "");
17026 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17027 V2SF_type_node = build_vector_type (float_type_node, 2);
17028 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
17029 : "__vector long long",
17030 intDI_type_node, 2);
17031 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
17032 V4SI_type_node = rs6000_vector_type ("__vector signed int",
17033 intSI_type_node, 4);
17034 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
17035 V8HI_type_node = rs6000_vector_type ("__vector signed short",
17036 intHI_type_node, 8);
17037 V16QI_type_node = rs6000_vector_type ("__vector signed char",
17038 intQI_type_node, 16);
17040 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
17041 unsigned_intQI_type_node, 16);
17042 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
17043 unsigned_intHI_type_node, 8);
17044 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
17045 unsigned_intSI_type_node, 4);
17046 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17047 ? "__vector unsigned long"
17048 : "__vector unsigned long long",
17049 unsigned_intDI_type_node, 2);
17051 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
17052 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
17053 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
17054 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
17056 const_str_type_node
17057 = build_pointer_type (build_qualified_type (char_type_node,
17058 TYPE_QUAL_CONST));
17060 /* We use V1TI mode as a special container to hold __int128_t items that
17061 must live in VSX registers. */
17062 if (intTI_type_node)
17064 V1TI_type_node = rs6000_vector_type ("__vector __int128",
17065 intTI_type_node, 1);
17066 unsigned_V1TI_type_node
17067 = rs6000_vector_type ("__vector unsigned __int128",
17068 unsigned_intTI_type_node, 1);
17071 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
17072 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
17073 'vector unsigned short'. */
17075 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
17076 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17077 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
17078 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
17079 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17081 long_integer_type_internal_node = long_integer_type_node;
17082 long_unsigned_type_internal_node = long_unsigned_type_node;
17083 long_long_integer_type_internal_node = long_long_integer_type_node;
17084 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
17085 intQI_type_internal_node = intQI_type_node;
17086 uintQI_type_internal_node = unsigned_intQI_type_node;
17087 intHI_type_internal_node = intHI_type_node;
17088 uintHI_type_internal_node = unsigned_intHI_type_node;
17089 intSI_type_internal_node = intSI_type_node;
17090 uintSI_type_internal_node = unsigned_intSI_type_node;
17091 intDI_type_internal_node = intDI_type_node;
17092 uintDI_type_internal_node = unsigned_intDI_type_node;
17093 intTI_type_internal_node = intTI_type_node;
17094 uintTI_type_internal_node = unsigned_intTI_type_node;
17095 float_type_internal_node = float_type_node;
17096 double_type_internal_node = double_type_node;
17097 long_double_type_internal_node = long_double_type_node;
17098 dfloat64_type_internal_node = dfloat64_type_node;
17099 dfloat128_type_internal_node = dfloat128_type_node;
17100 void_type_internal_node = void_type_node;
17102 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17103 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17104 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17105 format that uses a pair of doubles, depending on the switches and
17106 defaults.
17108 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17109 floating point, we need make sure the type is non-zero or else self-test
17110 fails during bootstrap.
17112 We don't register a built-in type for __ibm128 if the type is the same as
17113 long double. Instead we add a #define for __ibm128 in
17114 rs6000_cpu_cpp_builtins to long double.
17116 For IEEE 128-bit floating point, always create the type __ieee128. If the
17117 user used -mfloat128, rs6000-c.c will create a define from __float128 to
17118 __ieee128. */
17119 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17121 ibm128_float_type_node = make_node (REAL_TYPE);
17122 TYPE_PRECISION (ibm128_float_type_node) = 128;
17123 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17124 layout_type (ibm128_float_type_node);
17126 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17127 "__ibm128");
17129 else
17130 ibm128_float_type_node = long_double_type_node;
17132 if (TARGET_FLOAT128_TYPE)
17134 ieee128_float_type_node = float128_type_node;
17135 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17136 "__ieee128");
17139 else
17140 ieee128_float_type_node = long_double_type_node;
17142 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17143 tree type node. */
17144 builtin_mode_to_type[QImode][0] = integer_type_node;
17145 builtin_mode_to_type[HImode][0] = integer_type_node;
17146 builtin_mode_to_type[SImode][0] = intSI_type_node;
17147 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17148 builtin_mode_to_type[DImode][0] = intDI_type_node;
17149 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17150 builtin_mode_to_type[TImode][0] = intTI_type_node;
17151 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17152 builtin_mode_to_type[SFmode][0] = float_type_node;
17153 builtin_mode_to_type[DFmode][0] = double_type_node;
17154 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17155 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17156 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17157 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17158 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17159 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17160 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17161 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17162 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17163 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17164 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17165 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17166 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17167 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17168 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17169 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17170 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17171 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17172 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17174 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17175 TYPE_NAME (bool_char_type_node) = tdecl;
17177 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17178 TYPE_NAME (bool_short_type_node) = tdecl;
17180 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17181 TYPE_NAME (bool_int_type_node) = tdecl;
17183 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17184 TYPE_NAME (pixel_type_node) = tdecl;
17186 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17187 bool_char_type_node, 16);
17188 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17189 bool_short_type_node, 8);
17190 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17191 bool_int_type_node, 4);
17192 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17193 ? "__vector __bool long"
17194 : "__vector __bool long long",
17195 bool_long_type_node, 2);
17196 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17197 pixel_type_node, 8);
17199 /* Paired builtins are only available if you build a compiler with the
17200 appropriate options, so only create those builtins with the appropriate
17201 compiler option. Create Altivec and VSX builtins on machines with at
17202 least the general purpose extensions (970 and newer) to allow the use of
17203 the target attribute. */
17204 if (TARGET_PAIRED_FLOAT)
17205 paired_init_builtins ();
17206 if (TARGET_EXTRA_BUILTINS)
17207 altivec_init_builtins ();
17208 if (TARGET_HTM)
17209 htm_init_builtins ();
17211 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17212 rs6000_common_init_builtins ();
17214 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17215 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17216 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17218 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17219 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17220 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17222 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17223 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17224 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17226 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17227 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17228 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17230 mode = (TARGET_64BIT) ? DImode : SImode;
17231 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17232 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17233 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17235 ftype = build_function_type_list (unsigned_intDI_type_node,
17236 NULL_TREE);
17237 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17239 if (TARGET_64BIT)
17240 ftype = build_function_type_list (unsigned_intDI_type_node,
17241 NULL_TREE);
17242 else
17243 ftype = build_function_type_list (unsigned_intSI_type_node,
17244 NULL_TREE);
17245 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17247 ftype = build_function_type_list (double_type_node, NULL_TREE);
17248 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17250 ftype = build_function_type_list (void_type_node,
17251 intSI_type_node, double_type_node,
17252 NULL_TREE);
17253 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17255 ftype = build_function_type_list (void_type_node, NULL_TREE);
17256 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17257 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
17258 MISC_BUILTIN_SPEC_BARRIER);
17260 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17261 NULL_TREE);
17262 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17263 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17265 /* AIX libm provides clog as __clog. */
17266 if (TARGET_XCOFF &&
17267 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17268 set_user_assembler_name (tdecl, "__clog");
17270 #ifdef SUBTARGET_INIT_BUILTINS
17271 SUBTARGET_INIT_BUILTINS;
17272 #endif
17275 /* Returns the rs6000 builtin decl for CODE. */
17277 static tree
17278 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17280 HOST_WIDE_INT fnmask;
17282 if (code >= RS6000_BUILTIN_COUNT)
17283 return error_mark_node;
17285 fnmask = rs6000_builtin_info[code].mask;
17286 if ((fnmask & rs6000_builtin_mask) != fnmask)
17288 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17289 return error_mark_node;
17292 return rs6000_builtin_decls[code];
17295 static void
17296 paired_init_builtins (void)
17298 const struct builtin_description *d;
17299 size_t i;
17300 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17302 tree int_ftype_int_v2sf_v2sf
17303 = build_function_type_list (integer_type_node,
17304 integer_type_node,
17305 V2SF_type_node,
17306 V2SF_type_node,
17307 NULL_TREE);
17308 tree pcfloat_type_node =
17309 build_pointer_type (build_qualified_type
17310 (float_type_node, TYPE_QUAL_CONST));
17312 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17313 long_integer_type_node,
17314 pcfloat_type_node,
17315 NULL_TREE);
17316 tree void_ftype_v2sf_long_pcfloat =
17317 build_function_type_list (void_type_node,
17318 V2SF_type_node,
17319 long_integer_type_node,
17320 pcfloat_type_node,
17321 NULL_TREE);
17324 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17325 PAIRED_BUILTIN_LX);
17328 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17329 PAIRED_BUILTIN_STX);
17331 /* Predicates. */
17332 d = bdesc_paired_preds;
17333 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17335 tree type;
17336 HOST_WIDE_INT mask = d->mask;
17338 if ((mask & builtin_mask) != mask)
17340 if (TARGET_DEBUG_BUILTIN)
17341 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17342 d->name);
17343 continue;
17346 /* Cannot define builtin if the instruction is disabled. */
17347 gcc_assert (d->icode != CODE_FOR_nothing);
17349 if (TARGET_DEBUG_BUILTIN)
17350 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17351 (int)i, get_insn_name (d->icode), (int)d->icode,
17352 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17354 switch (insn_data[d->icode].operand[1].mode)
17356 case E_V2SFmode:
17357 type = int_ftype_int_v2sf_v2sf;
17358 break;
17359 default:
17360 gcc_unreachable ();
17363 def_builtin (d->name, type, d->code);
17367 static void
17368 altivec_init_builtins (void)
17370 const struct builtin_description *d;
17371 size_t i;
17372 tree ftype;
17373 tree decl;
17374 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17376 tree pvoid_type_node = build_pointer_type (void_type_node);
17378 tree pcvoid_type_node
17379 = build_pointer_type (build_qualified_type (void_type_node,
17380 TYPE_QUAL_CONST));
17382 tree int_ftype_opaque
17383 = build_function_type_list (integer_type_node,
17384 opaque_V4SI_type_node, NULL_TREE);
17385 tree opaque_ftype_opaque
17386 = build_function_type_list (integer_type_node, NULL_TREE);
17387 tree opaque_ftype_opaque_int
17388 = build_function_type_list (opaque_V4SI_type_node,
17389 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17390 tree opaque_ftype_opaque_opaque_int
17391 = build_function_type_list (opaque_V4SI_type_node,
17392 opaque_V4SI_type_node, opaque_V4SI_type_node,
17393 integer_type_node, NULL_TREE);
17394 tree opaque_ftype_opaque_opaque_opaque
17395 = build_function_type_list (opaque_V4SI_type_node,
17396 opaque_V4SI_type_node, opaque_V4SI_type_node,
17397 opaque_V4SI_type_node, NULL_TREE);
17398 tree opaque_ftype_opaque_opaque
17399 = build_function_type_list (opaque_V4SI_type_node,
17400 opaque_V4SI_type_node, opaque_V4SI_type_node,
17401 NULL_TREE);
17402 tree int_ftype_int_opaque_opaque
17403 = build_function_type_list (integer_type_node,
17404 integer_type_node, opaque_V4SI_type_node,
17405 opaque_V4SI_type_node, NULL_TREE);
17406 tree int_ftype_int_v4si_v4si
17407 = build_function_type_list (integer_type_node,
17408 integer_type_node, V4SI_type_node,
17409 V4SI_type_node, NULL_TREE);
17410 tree int_ftype_int_v2di_v2di
17411 = build_function_type_list (integer_type_node,
17412 integer_type_node, V2DI_type_node,
17413 V2DI_type_node, NULL_TREE);
17414 tree void_ftype_v4si
17415 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17416 tree v8hi_ftype_void
17417 = build_function_type_list (V8HI_type_node, NULL_TREE);
17418 tree void_ftype_void
17419 = build_function_type_list (void_type_node, NULL_TREE);
17420 tree void_ftype_int
17421 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17423 tree opaque_ftype_long_pcvoid
17424 = build_function_type_list (opaque_V4SI_type_node,
17425 long_integer_type_node, pcvoid_type_node,
17426 NULL_TREE);
17427 tree v16qi_ftype_long_pcvoid
17428 = build_function_type_list (V16QI_type_node,
17429 long_integer_type_node, pcvoid_type_node,
17430 NULL_TREE);
17431 tree v8hi_ftype_long_pcvoid
17432 = build_function_type_list (V8HI_type_node,
17433 long_integer_type_node, pcvoid_type_node,
17434 NULL_TREE);
17435 tree v4si_ftype_long_pcvoid
17436 = build_function_type_list (V4SI_type_node,
17437 long_integer_type_node, pcvoid_type_node,
17438 NULL_TREE);
17439 tree v4sf_ftype_long_pcvoid
17440 = build_function_type_list (V4SF_type_node,
17441 long_integer_type_node, pcvoid_type_node,
17442 NULL_TREE);
17443 tree v2df_ftype_long_pcvoid
17444 = build_function_type_list (V2DF_type_node,
17445 long_integer_type_node, pcvoid_type_node,
17446 NULL_TREE);
17447 tree v2di_ftype_long_pcvoid
17448 = build_function_type_list (V2DI_type_node,
17449 long_integer_type_node, pcvoid_type_node,
17450 NULL_TREE);
17451 tree v1ti_ftype_long_pcvoid
17452 = build_function_type_list (V1TI_type_node,
17453 long_integer_type_node, pcvoid_type_node,
17454 NULL_TREE);
17456 tree void_ftype_opaque_long_pvoid
17457 = build_function_type_list (void_type_node,
17458 opaque_V4SI_type_node, long_integer_type_node,
17459 pvoid_type_node, NULL_TREE);
17460 tree void_ftype_v4si_long_pvoid
17461 = build_function_type_list (void_type_node,
17462 V4SI_type_node, long_integer_type_node,
17463 pvoid_type_node, NULL_TREE);
17464 tree void_ftype_v16qi_long_pvoid
17465 = build_function_type_list (void_type_node,
17466 V16QI_type_node, long_integer_type_node,
17467 pvoid_type_node, NULL_TREE);
17469 tree void_ftype_v16qi_pvoid_long
17470 = build_function_type_list (void_type_node,
17471 V16QI_type_node, pvoid_type_node,
17472 long_integer_type_node, NULL_TREE);
17474 tree void_ftype_v8hi_long_pvoid
17475 = build_function_type_list (void_type_node,
17476 V8HI_type_node, long_integer_type_node,
17477 pvoid_type_node, NULL_TREE);
17478 tree void_ftype_v4sf_long_pvoid
17479 = build_function_type_list (void_type_node,
17480 V4SF_type_node, long_integer_type_node,
17481 pvoid_type_node, NULL_TREE);
17482 tree void_ftype_v2df_long_pvoid
17483 = build_function_type_list (void_type_node,
17484 V2DF_type_node, long_integer_type_node,
17485 pvoid_type_node, NULL_TREE);
17486 tree void_ftype_v1ti_long_pvoid
17487 = build_function_type_list (void_type_node,
17488 V1TI_type_node, long_integer_type_node,
17489 pvoid_type_node, NULL_TREE);
17490 tree void_ftype_v2di_long_pvoid
17491 = build_function_type_list (void_type_node,
17492 V2DI_type_node, long_integer_type_node,
17493 pvoid_type_node, NULL_TREE);
17494 tree int_ftype_int_v8hi_v8hi
17495 = build_function_type_list (integer_type_node,
17496 integer_type_node, V8HI_type_node,
17497 V8HI_type_node, NULL_TREE);
17498 tree int_ftype_int_v16qi_v16qi
17499 = build_function_type_list (integer_type_node,
17500 integer_type_node, V16QI_type_node,
17501 V16QI_type_node, NULL_TREE);
17502 tree int_ftype_int_v4sf_v4sf
17503 = build_function_type_list (integer_type_node,
17504 integer_type_node, V4SF_type_node,
17505 V4SF_type_node, NULL_TREE);
17506 tree int_ftype_int_v2df_v2df
17507 = build_function_type_list (integer_type_node,
17508 integer_type_node, V2DF_type_node,
17509 V2DF_type_node, NULL_TREE);
17510 tree v2di_ftype_v2di
17511 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17512 tree v4si_ftype_v4si
17513 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17514 tree v8hi_ftype_v8hi
17515 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17516 tree v16qi_ftype_v16qi
17517 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17518 tree v4sf_ftype_v4sf
17519 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17520 tree v2df_ftype_v2df
17521 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17522 tree void_ftype_pcvoid_int_int
17523 = build_function_type_list (void_type_node,
17524 pcvoid_type_node, integer_type_node,
17525 integer_type_node, NULL_TREE);
17527 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17528 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17529 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17530 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17531 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17532 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17533 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17534 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17535 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17536 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17537 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17538 ALTIVEC_BUILTIN_LVXL_V2DF);
17539 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17540 ALTIVEC_BUILTIN_LVXL_V2DI);
17541 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17542 ALTIVEC_BUILTIN_LVXL_V4SF);
17543 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17544 ALTIVEC_BUILTIN_LVXL_V4SI);
17545 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17546 ALTIVEC_BUILTIN_LVXL_V8HI);
17547 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17548 ALTIVEC_BUILTIN_LVXL_V16QI);
17549 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17550 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17551 ALTIVEC_BUILTIN_LVX_V1TI);
17552 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17553 ALTIVEC_BUILTIN_LVX_V2DF);
17554 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17555 ALTIVEC_BUILTIN_LVX_V2DI);
17556 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17557 ALTIVEC_BUILTIN_LVX_V4SF);
17558 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17559 ALTIVEC_BUILTIN_LVX_V4SI);
17560 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17561 ALTIVEC_BUILTIN_LVX_V8HI);
17562 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17563 ALTIVEC_BUILTIN_LVX_V16QI);
17564 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17565 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17566 ALTIVEC_BUILTIN_STVX_V2DF);
17567 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17568 ALTIVEC_BUILTIN_STVX_V2DI);
17569 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17570 ALTIVEC_BUILTIN_STVX_V4SF);
17571 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17572 ALTIVEC_BUILTIN_STVX_V4SI);
17573 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17574 ALTIVEC_BUILTIN_STVX_V8HI);
17575 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17576 ALTIVEC_BUILTIN_STVX_V16QI);
17577 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17578 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17579 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17580 ALTIVEC_BUILTIN_STVXL_V2DF);
17581 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17582 ALTIVEC_BUILTIN_STVXL_V2DI);
17583 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17584 ALTIVEC_BUILTIN_STVXL_V4SF);
17585 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17586 ALTIVEC_BUILTIN_STVXL_V4SI);
17587 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17588 ALTIVEC_BUILTIN_STVXL_V8HI);
17589 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17590 ALTIVEC_BUILTIN_STVXL_V16QI);
17591 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17592 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17593 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17594 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17595 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17596 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17597 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17598 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17599 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17600 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17601 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17602 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17603 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17604 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17605 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17606 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17608 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17609 VSX_BUILTIN_LXVD2X_V2DF);
17610 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17611 VSX_BUILTIN_LXVD2X_V2DI);
17612 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17613 VSX_BUILTIN_LXVW4X_V4SF);
17614 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17615 VSX_BUILTIN_LXVW4X_V4SI);
17616 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17617 VSX_BUILTIN_LXVW4X_V8HI);
17618 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17619 VSX_BUILTIN_LXVW4X_V16QI);
17620 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17621 VSX_BUILTIN_STXVD2X_V2DF);
17622 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17623 VSX_BUILTIN_STXVD2X_V2DI);
17624 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17625 VSX_BUILTIN_STXVW4X_V4SF);
17626 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17627 VSX_BUILTIN_STXVW4X_V4SI);
17628 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17629 VSX_BUILTIN_STXVW4X_V8HI);
17630 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17631 VSX_BUILTIN_STXVW4X_V16QI);
17633 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17634 VSX_BUILTIN_LD_ELEMREV_V2DF);
17635 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17636 VSX_BUILTIN_LD_ELEMREV_V2DI);
17637 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17638 VSX_BUILTIN_LD_ELEMREV_V4SF);
17639 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17640 VSX_BUILTIN_LD_ELEMREV_V4SI);
17641 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17642 VSX_BUILTIN_LD_ELEMREV_V8HI);
17643 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17644 VSX_BUILTIN_LD_ELEMREV_V16QI);
17645 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17646 VSX_BUILTIN_ST_ELEMREV_V2DF);
17647 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17648 VSX_BUILTIN_ST_ELEMREV_V1TI);
17649 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17650 VSX_BUILTIN_ST_ELEMREV_V2DI);
17651 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17652 VSX_BUILTIN_ST_ELEMREV_V4SF);
17653 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17654 VSX_BUILTIN_ST_ELEMREV_V4SI);
17655 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17656 VSX_BUILTIN_ST_ELEMREV_V8HI);
17657 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17658 VSX_BUILTIN_ST_ELEMREV_V16QI);
17660 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17661 VSX_BUILTIN_VEC_LD);
17662 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17663 VSX_BUILTIN_VEC_ST);
17664 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17665 VSX_BUILTIN_VEC_XL);
17666 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17667 VSX_BUILTIN_VEC_XL_BE);
17668 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17669 VSX_BUILTIN_VEC_XST);
17670 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17671 VSX_BUILTIN_VEC_XST_BE);
17673 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17674 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17675 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17677 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17678 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17679 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17680 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17681 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17682 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17683 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17684 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17685 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17686 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17687 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17688 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17690 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17691 ALTIVEC_BUILTIN_VEC_ADDE);
17692 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17693 ALTIVEC_BUILTIN_VEC_ADDEC);
17694 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17695 ALTIVEC_BUILTIN_VEC_CMPNE);
17696 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17697 ALTIVEC_BUILTIN_VEC_MUL);
17698 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17699 ALTIVEC_BUILTIN_VEC_SUBE);
17700 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17701 ALTIVEC_BUILTIN_VEC_SUBEC);
17703 /* Cell builtins. */
17704 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17705 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17706 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17707 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17709 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17710 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17711 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17712 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17714 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17715 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17716 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17717 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17719 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17720 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17721 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17722 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17724 if (TARGET_P9_VECTOR)
17726 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17727 P9V_BUILTIN_STXVL);
17728 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17729 P9V_BUILTIN_XST_LEN_R);
17732 /* Add the DST variants. */
17733 d = bdesc_dst;
17734 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17736 HOST_WIDE_INT mask = d->mask;
17738 /* It is expected that these dst built-in functions may have
17739 d->icode equal to CODE_FOR_nothing. */
17740 if ((mask & builtin_mask) != mask)
17742 if (TARGET_DEBUG_BUILTIN)
17743 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17744 d->name);
17745 continue;
17747 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17750 /* Initialize the predicates. */
17751 d = bdesc_altivec_preds;
17752 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17754 machine_mode mode1;
17755 tree type;
17756 HOST_WIDE_INT mask = d->mask;
17758 if ((mask & builtin_mask) != mask)
17760 if (TARGET_DEBUG_BUILTIN)
17761 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17762 d->name);
17763 continue;
17766 if (rs6000_overloaded_builtin_p (d->code))
17767 mode1 = VOIDmode;
17768 else
17770 /* Cannot define builtin if the instruction is disabled. */
17771 gcc_assert (d->icode != CODE_FOR_nothing);
17772 mode1 = insn_data[d->icode].operand[1].mode;
17775 switch (mode1)
17777 case E_VOIDmode:
17778 type = int_ftype_int_opaque_opaque;
17779 break;
17780 case E_V2DImode:
17781 type = int_ftype_int_v2di_v2di;
17782 break;
17783 case E_V4SImode:
17784 type = int_ftype_int_v4si_v4si;
17785 break;
17786 case E_V8HImode:
17787 type = int_ftype_int_v8hi_v8hi;
17788 break;
17789 case E_V16QImode:
17790 type = int_ftype_int_v16qi_v16qi;
17791 break;
17792 case E_V4SFmode:
17793 type = int_ftype_int_v4sf_v4sf;
17794 break;
17795 case E_V2DFmode:
17796 type = int_ftype_int_v2df_v2df;
17797 break;
17798 default:
17799 gcc_unreachable ();
17802 def_builtin (d->name, type, d->code);
17805 /* Initialize the abs* operators. */
17806 d = bdesc_abs;
17807 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17809 machine_mode mode0;
17810 tree type;
17811 HOST_WIDE_INT mask = d->mask;
17813 if ((mask & builtin_mask) != mask)
17815 if (TARGET_DEBUG_BUILTIN)
17816 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17817 d->name);
17818 continue;
17821 /* Cannot define builtin if the instruction is disabled. */
17822 gcc_assert (d->icode != CODE_FOR_nothing);
17823 mode0 = insn_data[d->icode].operand[0].mode;
17825 switch (mode0)
17827 case E_V2DImode:
17828 type = v2di_ftype_v2di;
17829 break;
17830 case E_V4SImode:
17831 type = v4si_ftype_v4si;
17832 break;
17833 case E_V8HImode:
17834 type = v8hi_ftype_v8hi;
17835 break;
17836 case E_V16QImode:
17837 type = v16qi_ftype_v16qi;
17838 break;
17839 case E_V4SFmode:
17840 type = v4sf_ftype_v4sf;
17841 break;
17842 case E_V2DFmode:
17843 type = v2df_ftype_v2df;
17844 break;
17845 default:
17846 gcc_unreachable ();
17849 def_builtin (d->name, type, d->code);
17852 /* Initialize target builtin that implements
17853 targetm.vectorize.builtin_mask_for_load. */
17855 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17856 v16qi_ftype_long_pcvoid,
17857 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17858 BUILT_IN_MD, NULL, NULL_TREE);
17859 TREE_READONLY (decl) = 1;
17860 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17861 altivec_builtin_mask_for_load = decl;
17863 /* Access to the vec_init patterns. */
17864 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17865 integer_type_node, integer_type_node,
17866 integer_type_node, NULL_TREE);
17867 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17869 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17870 short_integer_type_node,
17871 short_integer_type_node,
17872 short_integer_type_node,
17873 short_integer_type_node,
17874 short_integer_type_node,
17875 short_integer_type_node,
17876 short_integer_type_node, NULL_TREE);
17877 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17879 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17880 char_type_node, char_type_node,
17881 char_type_node, char_type_node,
17882 char_type_node, char_type_node,
17883 char_type_node, char_type_node,
17884 char_type_node, char_type_node,
17885 char_type_node, char_type_node,
17886 char_type_node, char_type_node,
17887 char_type_node, NULL_TREE);
17888 def_builtin ("__builtin_vec_init_v16qi", ftype,
17889 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17891 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17892 float_type_node, float_type_node,
17893 float_type_node, NULL_TREE);
17894 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17896 /* VSX builtins. */
17897 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17898 double_type_node, NULL_TREE);
17899 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17901 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17902 intDI_type_node, NULL_TREE);
17903 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17905 /* Access to the vec_set patterns. */
17906 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17907 intSI_type_node,
17908 integer_type_node, NULL_TREE);
17909 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17911 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17912 intHI_type_node,
17913 integer_type_node, NULL_TREE);
17914 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17916 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17917 intQI_type_node,
17918 integer_type_node, NULL_TREE);
17919 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17921 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17922 float_type_node,
17923 integer_type_node, NULL_TREE);
17924 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17926 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17927 double_type_node,
17928 integer_type_node, NULL_TREE);
17929 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17931 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17932 intDI_type_node,
17933 integer_type_node, NULL_TREE);
17934 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17936 /* Access to the vec_extract patterns. */
17937 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17938 integer_type_node, NULL_TREE);
17939 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17941 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17942 integer_type_node, NULL_TREE);
17943 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17945 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17946 integer_type_node, NULL_TREE);
17947 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17949 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17950 integer_type_node, NULL_TREE);
17951 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17953 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17954 integer_type_node, NULL_TREE);
17955 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17957 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17958 integer_type_node, NULL_TREE);
17959 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17962 if (V1TI_type_node)
17964 tree v1ti_ftype_long_pcvoid
17965 = build_function_type_list (V1TI_type_node,
17966 long_integer_type_node, pcvoid_type_node,
17967 NULL_TREE);
17968 tree void_ftype_v1ti_long_pvoid
17969 = build_function_type_list (void_type_node,
17970 V1TI_type_node, long_integer_type_node,
17971 pvoid_type_node, NULL_TREE);
17972 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17973 VSX_BUILTIN_LD_ELEMREV_V1TI);
17974 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17975 VSX_BUILTIN_LXVD2X_V1TI);
17976 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17977 VSX_BUILTIN_STXVD2X_V1TI);
17978 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17979 NULL_TREE, NULL_TREE);
17980 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17981 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17982 intTI_type_node,
17983 integer_type_node, NULL_TREE);
17984 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17985 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17986 integer_type_node, NULL_TREE);
17987 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17992 static void
17993 htm_init_builtins (void)
17995 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17996 const struct builtin_description *d;
17997 size_t i;
17999 d = bdesc_htm;
18000 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
18002 tree op[MAX_HTM_OPERANDS], type;
18003 HOST_WIDE_INT mask = d->mask;
18004 unsigned attr = rs6000_builtin_info[d->code].attr;
18005 bool void_func = (attr & RS6000_BTC_VOID);
18006 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
18007 int nopnds = 0;
18008 tree gpr_type_node;
18009 tree rettype;
18010 tree argtype;
18012 /* It is expected that these htm built-in functions may have
18013 d->icode equal to CODE_FOR_nothing. */
18015 if (TARGET_32BIT && TARGET_POWERPC64)
18016 gpr_type_node = long_long_unsigned_type_node;
18017 else
18018 gpr_type_node = long_unsigned_type_node;
18020 if (attr & RS6000_BTC_SPR)
18022 rettype = gpr_type_node;
18023 argtype = gpr_type_node;
18025 else if (d->code == HTM_BUILTIN_TABORTDC
18026 || d->code == HTM_BUILTIN_TABORTDCI)
18028 rettype = unsigned_type_node;
18029 argtype = gpr_type_node;
18031 else
18033 rettype = unsigned_type_node;
18034 argtype = unsigned_type_node;
18037 if ((mask & builtin_mask) != mask)
18039 if (TARGET_DEBUG_BUILTIN)
18040 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
18041 continue;
18044 if (d->name == 0)
18046 if (TARGET_DEBUG_BUILTIN)
18047 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
18048 (long unsigned) i);
18049 continue;
18052 op[nopnds++] = (void_func) ? void_type_node : rettype;
18054 if (attr_args == RS6000_BTC_UNARY)
18055 op[nopnds++] = argtype;
18056 else if (attr_args == RS6000_BTC_BINARY)
18058 op[nopnds++] = argtype;
18059 op[nopnds++] = argtype;
18061 else if (attr_args == RS6000_BTC_TERNARY)
18063 op[nopnds++] = argtype;
18064 op[nopnds++] = argtype;
18065 op[nopnds++] = argtype;
18068 switch (nopnds)
18070 case 1:
18071 type = build_function_type_list (op[0], NULL_TREE);
18072 break;
18073 case 2:
18074 type = build_function_type_list (op[0], op[1], NULL_TREE);
18075 break;
18076 case 3:
18077 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
18078 break;
18079 case 4:
18080 type = build_function_type_list (op[0], op[1], op[2], op[3],
18081 NULL_TREE);
18082 break;
18083 default:
18084 gcc_unreachable ();
18087 def_builtin (d->name, type, d->code);
18091 /* Hash function for builtin functions with up to 3 arguments and a return
18092 type. */
18093 hashval_t
18094 builtin_hasher::hash (builtin_hash_struct *bh)
18096 unsigned ret = 0;
18097 int i;
18099 for (i = 0; i < 4; i++)
18101 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
18102 ret = (ret * 2) + bh->uns_p[i];
18105 return ret;
18108 /* Compare builtin hash entries H1 and H2 for equivalence. */
18109 bool
18110 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18112 return ((p1->mode[0] == p2->mode[0])
18113 && (p1->mode[1] == p2->mode[1])
18114 && (p1->mode[2] == p2->mode[2])
18115 && (p1->mode[3] == p2->mode[3])
18116 && (p1->uns_p[0] == p2->uns_p[0])
18117 && (p1->uns_p[1] == p2->uns_p[1])
18118 && (p1->uns_p[2] == p2->uns_p[2])
18119 && (p1->uns_p[3] == p2->uns_p[3]));
18122 /* Map types for builtin functions with an explicit return type and up to 3
18123 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18124 of the argument. */
18125 static tree
18126 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18127 machine_mode mode_arg1, machine_mode mode_arg2,
18128 enum rs6000_builtins builtin, const char *name)
18130 struct builtin_hash_struct h;
18131 struct builtin_hash_struct *h2;
18132 int num_args = 3;
18133 int i;
18134 tree ret_type = NULL_TREE;
18135 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18137 /* Create builtin_hash_table. */
18138 if (builtin_hash_table == NULL)
18139 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18141 h.type = NULL_TREE;
18142 h.mode[0] = mode_ret;
18143 h.mode[1] = mode_arg0;
18144 h.mode[2] = mode_arg1;
18145 h.mode[3] = mode_arg2;
18146 h.uns_p[0] = 0;
18147 h.uns_p[1] = 0;
18148 h.uns_p[2] = 0;
18149 h.uns_p[3] = 0;
18151 /* If the builtin is a type that produces unsigned results or takes unsigned
18152 arguments, and it is returned as a decl for the vectorizer (such as
18153 widening multiplies, permute), make sure the arguments and return value
18154 are type correct. */
18155 switch (builtin)
18157 /* unsigned 1 argument functions. */
18158 case CRYPTO_BUILTIN_VSBOX:
18159 case P8V_BUILTIN_VGBBD:
18160 case MISC_BUILTIN_CDTBCD:
18161 case MISC_BUILTIN_CBCDTD:
18162 h.uns_p[0] = 1;
18163 h.uns_p[1] = 1;
18164 break;
18166 /* unsigned 2 argument functions. */
18167 case ALTIVEC_BUILTIN_VMULEUB:
18168 case ALTIVEC_BUILTIN_VMULEUH:
18169 case P8V_BUILTIN_VMULEUW:
18170 case ALTIVEC_BUILTIN_VMULOUB:
18171 case ALTIVEC_BUILTIN_VMULOUH:
18172 case P8V_BUILTIN_VMULOUW:
18173 case CRYPTO_BUILTIN_VCIPHER:
18174 case CRYPTO_BUILTIN_VCIPHERLAST:
18175 case CRYPTO_BUILTIN_VNCIPHER:
18176 case CRYPTO_BUILTIN_VNCIPHERLAST:
18177 case CRYPTO_BUILTIN_VPMSUMB:
18178 case CRYPTO_BUILTIN_VPMSUMH:
18179 case CRYPTO_BUILTIN_VPMSUMW:
18180 case CRYPTO_BUILTIN_VPMSUMD:
18181 case CRYPTO_BUILTIN_VPMSUM:
18182 case MISC_BUILTIN_ADDG6S:
18183 case MISC_BUILTIN_DIVWEU:
18184 case MISC_BUILTIN_DIVWEUO:
18185 case MISC_BUILTIN_DIVDEU:
18186 case MISC_BUILTIN_DIVDEUO:
18187 case VSX_BUILTIN_UDIV_V2DI:
18188 case ALTIVEC_BUILTIN_VMAXUB:
18189 case ALTIVEC_BUILTIN_VMINUB:
18190 case ALTIVEC_BUILTIN_VMAXUH:
18191 case ALTIVEC_BUILTIN_VMINUH:
18192 case ALTIVEC_BUILTIN_VMAXUW:
18193 case ALTIVEC_BUILTIN_VMINUW:
18194 case P8V_BUILTIN_VMAXUD:
18195 case P8V_BUILTIN_VMINUD:
18196 h.uns_p[0] = 1;
18197 h.uns_p[1] = 1;
18198 h.uns_p[2] = 1;
18199 break;
18201 /* unsigned 3 argument functions. */
18202 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18203 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18204 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18205 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18206 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18207 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18208 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18209 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18210 case VSX_BUILTIN_VPERM_16QI_UNS:
18211 case VSX_BUILTIN_VPERM_8HI_UNS:
18212 case VSX_BUILTIN_VPERM_4SI_UNS:
18213 case VSX_BUILTIN_VPERM_2DI_UNS:
18214 case VSX_BUILTIN_XXSEL_16QI_UNS:
18215 case VSX_BUILTIN_XXSEL_8HI_UNS:
18216 case VSX_BUILTIN_XXSEL_4SI_UNS:
18217 case VSX_BUILTIN_XXSEL_2DI_UNS:
18218 case CRYPTO_BUILTIN_VPERMXOR:
18219 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18220 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18221 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18222 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18223 case CRYPTO_BUILTIN_VSHASIGMAW:
18224 case CRYPTO_BUILTIN_VSHASIGMAD:
18225 case CRYPTO_BUILTIN_VSHASIGMA:
18226 h.uns_p[0] = 1;
18227 h.uns_p[1] = 1;
18228 h.uns_p[2] = 1;
18229 h.uns_p[3] = 1;
18230 break;
18232 /* signed permute functions with unsigned char mask. */
18233 case ALTIVEC_BUILTIN_VPERM_16QI:
18234 case ALTIVEC_BUILTIN_VPERM_8HI:
18235 case ALTIVEC_BUILTIN_VPERM_4SI:
18236 case ALTIVEC_BUILTIN_VPERM_4SF:
18237 case ALTIVEC_BUILTIN_VPERM_2DI:
18238 case ALTIVEC_BUILTIN_VPERM_2DF:
18239 case VSX_BUILTIN_VPERM_16QI:
18240 case VSX_BUILTIN_VPERM_8HI:
18241 case VSX_BUILTIN_VPERM_4SI:
18242 case VSX_BUILTIN_VPERM_4SF:
18243 case VSX_BUILTIN_VPERM_2DI:
18244 case VSX_BUILTIN_VPERM_2DF:
18245 h.uns_p[3] = 1;
18246 break;
18248 /* unsigned args, signed return. */
18249 case VSX_BUILTIN_XVCVUXDSP:
18250 case VSX_BUILTIN_XVCVUXDDP_UNS:
18251 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18252 h.uns_p[1] = 1;
18253 break;
18255 /* signed args, unsigned return. */
18256 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18257 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18258 case MISC_BUILTIN_UNPACK_TD:
18259 case MISC_BUILTIN_UNPACK_V1TI:
18260 h.uns_p[0] = 1;
18261 break;
18263 /* unsigned arguments, bool return (compares). */
18264 case ALTIVEC_BUILTIN_VCMPEQUB:
18265 case ALTIVEC_BUILTIN_VCMPEQUH:
18266 case ALTIVEC_BUILTIN_VCMPEQUW:
18267 case P8V_BUILTIN_VCMPEQUD:
18268 case VSX_BUILTIN_CMPGE_U16QI:
18269 case VSX_BUILTIN_CMPGE_U8HI:
18270 case VSX_BUILTIN_CMPGE_U4SI:
18271 case VSX_BUILTIN_CMPGE_U2DI:
18272 case ALTIVEC_BUILTIN_VCMPGTUB:
18273 case ALTIVEC_BUILTIN_VCMPGTUH:
18274 case ALTIVEC_BUILTIN_VCMPGTUW:
18275 case P8V_BUILTIN_VCMPGTUD:
18276 h.uns_p[1] = 1;
18277 h.uns_p[2] = 1;
18278 break;
18280 /* unsigned arguments for 128-bit pack instructions. */
18281 case MISC_BUILTIN_PACK_TD:
18282 case MISC_BUILTIN_PACK_V1TI:
18283 h.uns_p[1] = 1;
18284 h.uns_p[2] = 1;
18285 break;
18287 /* unsigned second arguments (vector shift right). */
18288 case ALTIVEC_BUILTIN_VSRB:
18289 case ALTIVEC_BUILTIN_VSRH:
18290 case ALTIVEC_BUILTIN_VSRW:
18291 case P8V_BUILTIN_VSRD:
18292 h.uns_p[2] = 1;
18293 break;
18295 default:
18296 break;
18299 /* Figure out how many args are present. */
18300 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18301 num_args--;
18303 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18304 if (!ret_type && h.uns_p[0])
18305 ret_type = builtin_mode_to_type[h.mode[0]][0];
18307 if (!ret_type)
18308 fatal_error (input_location,
18309 "internal error: builtin function %qs had an unexpected "
18310 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18312 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18313 arg_type[i] = NULL_TREE;
18315 for (i = 0; i < num_args; i++)
18317 int m = (int) h.mode[i+1];
18318 int uns_p = h.uns_p[i+1];
18320 arg_type[i] = builtin_mode_to_type[m][uns_p];
18321 if (!arg_type[i] && uns_p)
18322 arg_type[i] = builtin_mode_to_type[m][0];
18324 if (!arg_type[i])
18325 fatal_error (input_location,
18326 "internal error: builtin function %qs, argument %d "
18327 "had unexpected argument type %qs", name, i,
18328 GET_MODE_NAME (m));
18331 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18332 if (*found == NULL)
18334 h2 = ggc_alloc<builtin_hash_struct> ();
18335 *h2 = h;
18336 *found = h2;
18338 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18339 arg_type[2], NULL_TREE);
18342 return (*found)->type;
18345 static void
18346 rs6000_common_init_builtins (void)
18348 const struct builtin_description *d;
18349 size_t i;
18351 tree opaque_ftype_opaque = NULL_TREE;
18352 tree opaque_ftype_opaque_opaque = NULL_TREE;
18353 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18354 tree v2si_ftype = NULL_TREE;
18355 tree v2si_ftype_qi = NULL_TREE;
18356 tree v2si_ftype_v2si_qi = NULL_TREE;
18357 tree v2si_ftype_int_qi = NULL_TREE;
18358 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18360 if (!TARGET_PAIRED_FLOAT)
18362 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18363 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18366 /* Paired builtins are only available if you build a compiler with the
18367 appropriate options, so only create those builtins with the appropriate
18368 compiler option. Create Altivec and VSX builtins on machines with at
18369 least the general purpose extensions (970 and newer) to allow the use of
18370 the target attribute.. */
18372 if (TARGET_EXTRA_BUILTINS)
18373 builtin_mask |= RS6000_BTM_COMMON;
18375 /* Add the ternary operators. */
18376 d = bdesc_3arg;
18377 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18379 tree type;
18380 HOST_WIDE_INT mask = d->mask;
18382 if ((mask & builtin_mask) != mask)
18384 if (TARGET_DEBUG_BUILTIN)
18385 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18386 continue;
18389 if (rs6000_overloaded_builtin_p (d->code))
18391 if (! (type = opaque_ftype_opaque_opaque_opaque))
18392 type = opaque_ftype_opaque_opaque_opaque
18393 = build_function_type_list (opaque_V4SI_type_node,
18394 opaque_V4SI_type_node,
18395 opaque_V4SI_type_node,
18396 opaque_V4SI_type_node,
18397 NULL_TREE);
18399 else
18401 enum insn_code icode = d->icode;
18402 if (d->name == 0)
18404 if (TARGET_DEBUG_BUILTIN)
18405 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18406 (long unsigned)i);
18408 continue;
18411 if (icode == CODE_FOR_nothing)
18413 if (TARGET_DEBUG_BUILTIN)
18414 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18415 d->name);
18417 continue;
18420 type = builtin_function_type (insn_data[icode].operand[0].mode,
18421 insn_data[icode].operand[1].mode,
18422 insn_data[icode].operand[2].mode,
18423 insn_data[icode].operand[3].mode,
18424 d->code, d->name);
18427 def_builtin (d->name, type, d->code);
18430 /* Add the binary operators. */
18431 d = bdesc_2arg;
18432 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18434 machine_mode mode0, mode1, mode2;
18435 tree type;
18436 HOST_WIDE_INT mask = d->mask;
18438 if ((mask & builtin_mask) != mask)
18440 if (TARGET_DEBUG_BUILTIN)
18441 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18442 continue;
18445 if (rs6000_overloaded_builtin_p (d->code))
18447 if (! (type = opaque_ftype_opaque_opaque))
18448 type = opaque_ftype_opaque_opaque
18449 = build_function_type_list (opaque_V4SI_type_node,
18450 opaque_V4SI_type_node,
18451 opaque_V4SI_type_node,
18452 NULL_TREE);
18454 else
18456 enum insn_code icode = d->icode;
18457 if (d->name == 0)
18459 if (TARGET_DEBUG_BUILTIN)
18460 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18461 (long unsigned)i);
18463 continue;
18466 if (icode == CODE_FOR_nothing)
18468 if (TARGET_DEBUG_BUILTIN)
18469 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18470 d->name);
18472 continue;
18475 mode0 = insn_data[icode].operand[0].mode;
18476 mode1 = insn_data[icode].operand[1].mode;
18477 mode2 = insn_data[icode].operand[2].mode;
18479 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18481 if (! (type = v2si_ftype_v2si_qi))
18482 type = v2si_ftype_v2si_qi
18483 = build_function_type_list (opaque_V2SI_type_node,
18484 opaque_V2SI_type_node,
18485 char_type_node,
18486 NULL_TREE);
18489 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18490 && mode2 == QImode)
18492 if (! (type = v2si_ftype_int_qi))
18493 type = v2si_ftype_int_qi
18494 = build_function_type_list (opaque_V2SI_type_node,
18495 integer_type_node,
18496 char_type_node,
18497 NULL_TREE);
18500 else
18501 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18502 d->code, d->name);
18505 def_builtin (d->name, type, d->code);
18508 /* Add the simple unary operators. */
18509 d = bdesc_1arg;
18510 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18512 machine_mode mode0, mode1;
18513 tree type;
18514 HOST_WIDE_INT mask = d->mask;
18516 if ((mask & builtin_mask) != mask)
18518 if (TARGET_DEBUG_BUILTIN)
18519 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18520 continue;
18523 if (rs6000_overloaded_builtin_p (d->code))
18525 if (! (type = opaque_ftype_opaque))
18526 type = opaque_ftype_opaque
18527 = build_function_type_list (opaque_V4SI_type_node,
18528 opaque_V4SI_type_node,
18529 NULL_TREE);
18531 else
18533 enum insn_code icode = d->icode;
18534 if (d->name == 0)
18536 if (TARGET_DEBUG_BUILTIN)
18537 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18538 (long unsigned)i);
18540 continue;
18543 if (icode == CODE_FOR_nothing)
18545 if (TARGET_DEBUG_BUILTIN)
18546 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18547 d->name);
18549 continue;
18552 mode0 = insn_data[icode].operand[0].mode;
18553 mode1 = insn_data[icode].operand[1].mode;
18555 if (mode0 == V2SImode && mode1 == QImode)
18557 if (! (type = v2si_ftype_qi))
18558 type = v2si_ftype_qi
18559 = build_function_type_list (opaque_V2SI_type_node,
18560 char_type_node,
18561 NULL_TREE);
18564 else
18565 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18566 d->code, d->name);
18569 def_builtin (d->name, type, d->code);
18572 /* Add the simple no-argument operators. */
18573 d = bdesc_0arg;
18574 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18576 machine_mode mode0;
18577 tree type;
18578 HOST_WIDE_INT mask = d->mask;
18580 if ((mask & builtin_mask) != mask)
18582 if (TARGET_DEBUG_BUILTIN)
18583 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18584 continue;
18586 if (rs6000_overloaded_builtin_p (d->code))
18588 if (!opaque_ftype_opaque)
18589 opaque_ftype_opaque
18590 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18591 type = opaque_ftype_opaque;
18593 else
18595 enum insn_code icode = d->icode;
18596 if (d->name == 0)
18598 if (TARGET_DEBUG_BUILTIN)
18599 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18600 (long unsigned) i);
18601 continue;
18603 if (icode == CODE_FOR_nothing)
18605 if (TARGET_DEBUG_BUILTIN)
18606 fprintf (stderr,
18607 "rs6000_builtin, skip no-argument %s (no code)\n",
18608 d->name);
18609 continue;
18611 mode0 = insn_data[icode].operand[0].mode;
18612 if (mode0 == V2SImode)
18614 /* code for paired single */
18615 if (! (type = v2si_ftype))
18617 v2si_ftype
18618 = build_function_type_list (opaque_V2SI_type_node,
18619 NULL_TREE);
18620 type = v2si_ftype;
18623 else
18624 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18625 d->code, d->name);
18627 def_builtin (d->name, type, d->code);
18631 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18632 static void
18633 init_float128_ibm (machine_mode mode)
18635 if (!TARGET_XL_COMPAT)
18637 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18638 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18639 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18640 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18642 if (!TARGET_HARD_FLOAT)
18644 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18645 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18646 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18647 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18648 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18649 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18650 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18651 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18653 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18654 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18655 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18656 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18657 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18658 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18659 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18660 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18663 else
18665 set_optab_libfunc (add_optab, mode, "_xlqadd");
18666 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18667 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18668 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18671 /* Add various conversions for IFmode to use the traditional TFmode
18672 names. */
18673 if (mode == IFmode)
18675 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18676 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18677 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18678 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18679 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18680 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18682 if (TARGET_POWERPC64)
18684 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18685 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18686 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18687 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18692 /* Set up IEEE 128-bit floating point routines. Use different names if the
18693 arguments can be passed in a vector register. The historical PowerPC
18694 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18695 continue to use that if we aren't using vector registers to pass IEEE
18696 128-bit floating point. */
18698 static void
18699 init_float128_ieee (machine_mode mode)
18701 if (FLOAT128_VECTOR_P (mode))
18703 set_optab_libfunc (add_optab, mode, "__addkf3");
18704 set_optab_libfunc (sub_optab, mode, "__subkf3");
18705 set_optab_libfunc (neg_optab, mode, "__negkf2");
18706 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18707 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18708 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18709 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18711 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18712 set_optab_libfunc (ne_optab, mode, "__nekf2");
18713 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18714 set_optab_libfunc (ge_optab, mode, "__gekf2");
18715 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18716 set_optab_libfunc (le_optab, mode, "__lekf2");
18717 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18719 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18720 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18721 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18722 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18724 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18725 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18726 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18728 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18729 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18730 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18732 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18733 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18734 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18735 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18736 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18737 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18739 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18740 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18741 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18742 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18744 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18745 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18746 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18747 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18749 if (TARGET_POWERPC64)
18751 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18752 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18753 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18754 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18758 else
18760 set_optab_libfunc (add_optab, mode, "_q_add");
18761 set_optab_libfunc (sub_optab, mode, "_q_sub");
18762 set_optab_libfunc (neg_optab, mode, "_q_neg");
18763 set_optab_libfunc (smul_optab, mode, "_q_mul");
18764 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18765 if (TARGET_PPC_GPOPT)
18766 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18768 set_optab_libfunc (eq_optab, mode, "_q_feq");
18769 set_optab_libfunc (ne_optab, mode, "_q_fne");
18770 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18771 set_optab_libfunc (ge_optab, mode, "_q_fge");
18772 set_optab_libfunc (lt_optab, mode, "_q_flt");
18773 set_optab_libfunc (le_optab, mode, "_q_fle");
18775 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18776 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18777 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18778 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18779 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18780 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18781 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18782 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18786 static void
18787 rs6000_init_libfuncs (void)
18789 /* __float128 support. */
18790 if (TARGET_FLOAT128_TYPE)
18792 init_float128_ibm (IFmode);
18793 init_float128_ieee (KFmode);
18796 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18797 if (TARGET_LONG_DOUBLE_128)
18799 if (!TARGET_IEEEQUAD)
18800 init_float128_ibm (TFmode);
18802 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18803 else
18804 init_float128_ieee (TFmode);
18808 /* Emit a potentially record-form instruction, setting DST from SRC.
18809 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18810 signed comparison of DST with zero. If DOT is 1, the generated RTL
18811 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18812 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18813 a separate COMPARE. */
18815 void
18816 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18818 if (dot == 0)
18820 emit_move_insn (dst, src);
18821 return;
18824 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18826 emit_move_insn (dst, src);
18827 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18828 return;
18831 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18832 if (dot == 1)
18834 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18835 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18837 else
18839 rtx set = gen_rtx_SET (dst, src);
18840 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18845 /* A validation routine: say whether CODE, a condition code, and MODE
18846 match. The other alternatives either don't make sense or should
18847 never be generated. */
18849 void
18850 validate_condition_mode (enum rtx_code code, machine_mode mode)
18852 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18853 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18854 && GET_MODE_CLASS (mode) == MODE_CC);
18856 /* These don't make sense. */
18857 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18858 || mode != CCUNSmode);
18860 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18861 || mode == CCUNSmode);
18863 gcc_assert (mode == CCFPmode
18864 || (code != ORDERED && code != UNORDERED
18865 && code != UNEQ && code != LTGT
18866 && code != UNGT && code != UNLT
18867 && code != UNGE && code != UNLE));
18869 /* These should never be generated except for
18870 flag_finite_math_only. */
18871 gcc_assert (mode != CCFPmode
18872 || flag_finite_math_only
18873 || (code != LE && code != GE
18874 && code != UNEQ && code != LTGT
18875 && code != UNGT && code != UNLT));
18877 /* These are invalid; the information is not there. */
18878 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18882 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18883 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18884 not zero, store there the bit offset (counted from the right) where
18885 the single stretch of 1 bits begins; and similarly for B, the bit
18886 offset where it ends. */
18888 bool
18889 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18891 unsigned HOST_WIDE_INT val = INTVAL (mask);
18892 unsigned HOST_WIDE_INT bit;
18893 int nb, ne;
18894 int n = GET_MODE_PRECISION (mode);
18896 if (mode != DImode && mode != SImode)
18897 return false;
18899 if (INTVAL (mask) >= 0)
18901 bit = val & -val;
18902 ne = exact_log2 (bit);
18903 nb = exact_log2 (val + bit);
18905 else if (val + 1 == 0)
18907 nb = n;
18908 ne = 0;
18910 else if (val & 1)
18912 val = ~val;
18913 bit = val & -val;
18914 nb = exact_log2 (bit);
18915 ne = exact_log2 (val + bit);
18917 else
18919 bit = val & -val;
18920 ne = exact_log2 (bit);
18921 if (val + bit == 0)
18922 nb = n;
18923 else
18924 nb = 0;
18927 nb--;
18929 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18930 return false;
18932 if (b)
18933 *b = nb;
18934 if (e)
18935 *e = ne;
18937 return true;
18940 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18941 or rldicr instruction, to implement an AND with it in mode MODE. */
18943 bool
18944 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18946 int nb, ne;
18948 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18949 return false;
18951 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18952 does not wrap. */
18953 if (mode == DImode)
18954 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18956 /* For SImode, rlwinm can do everything. */
18957 if (mode == SImode)
18958 return (nb < 32 && ne < 32);
18960 return false;
18963 /* Return the instruction template for an AND with mask in mode MODE, with
18964 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18966 const char *
18967 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18969 int nb, ne;
18971 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18972 gcc_unreachable ();
18974 if (mode == DImode && ne == 0)
18976 operands[3] = GEN_INT (63 - nb);
18977 if (dot)
18978 return "rldicl. %0,%1,0,%3";
18979 return "rldicl %0,%1,0,%3";
18982 if (mode == DImode && nb == 63)
18984 operands[3] = GEN_INT (63 - ne);
18985 if (dot)
18986 return "rldicr. %0,%1,0,%3";
18987 return "rldicr %0,%1,0,%3";
18990 if (nb < 32 && ne < 32)
18992 operands[3] = GEN_INT (31 - nb);
18993 operands[4] = GEN_INT (31 - ne);
18994 if (dot)
18995 return "rlwinm. %0,%1,0,%3,%4";
18996 return "rlwinm %0,%1,0,%3,%4";
18999 gcc_unreachable ();
19002 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
19003 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
19004 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
19006 bool
19007 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
19009 int nb, ne;
19011 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19012 return false;
19014 int n = GET_MODE_PRECISION (mode);
19015 int sh = -1;
19017 if (CONST_INT_P (XEXP (shift, 1)))
19019 sh = INTVAL (XEXP (shift, 1));
19020 if (sh < 0 || sh >= n)
19021 return false;
19024 rtx_code code = GET_CODE (shift);
19026 /* Convert any shift by 0 to a rotate, to simplify below code. */
19027 if (sh == 0)
19028 code = ROTATE;
19030 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19031 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19032 code = ASHIFT;
19033 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19035 code = LSHIFTRT;
19036 sh = n - sh;
19039 /* DImode rotates need rld*. */
19040 if (mode == DImode && code == ROTATE)
19041 return (nb == 63 || ne == 0 || ne == sh);
19043 /* SImode rotates need rlw*. */
19044 if (mode == SImode && code == ROTATE)
19045 return (nb < 32 && ne < 32 && sh < 32);
19047 /* Wrap-around masks are only okay for rotates. */
19048 if (ne > nb)
19049 return false;
19051 /* Variable shifts are only okay for rotates. */
19052 if (sh < 0)
19053 return false;
19055 /* Don't allow ASHIFT if the mask is wrong for that. */
19056 if (code == ASHIFT && ne < sh)
19057 return false;
19059 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
19060 if the mask is wrong for that. */
19061 if (nb < 32 && ne < 32 && sh < 32
19062 && !(code == LSHIFTRT && nb >= 32 - sh))
19063 return true;
19065 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
19066 if the mask is wrong for that. */
19067 if (code == LSHIFTRT)
19068 sh = 64 - sh;
19069 if (nb == 63 || ne == 0 || ne == sh)
19070 return !(code == LSHIFTRT && nb >= sh);
19072 return false;
19075 /* Return the instruction template for a shift with mask in mode MODE, with
19076 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19078 const char *
19079 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
19081 int nb, ne;
19083 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19084 gcc_unreachable ();
19086 if (mode == DImode && ne == 0)
19088 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19089 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
19090 operands[3] = GEN_INT (63 - nb);
19091 if (dot)
19092 return "rld%I2cl. %0,%1,%2,%3";
19093 return "rld%I2cl %0,%1,%2,%3";
19096 if (mode == DImode && nb == 63)
19098 operands[3] = GEN_INT (63 - ne);
19099 if (dot)
19100 return "rld%I2cr. %0,%1,%2,%3";
19101 return "rld%I2cr %0,%1,%2,%3";
19104 if (mode == DImode
19105 && GET_CODE (operands[4]) != LSHIFTRT
19106 && CONST_INT_P (operands[2])
19107 && ne == INTVAL (operands[2]))
19109 operands[3] = GEN_INT (63 - nb);
19110 if (dot)
19111 return "rld%I2c. %0,%1,%2,%3";
19112 return "rld%I2c %0,%1,%2,%3";
19115 if (nb < 32 && ne < 32)
19117 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19118 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19119 operands[3] = GEN_INT (31 - nb);
19120 operands[4] = GEN_INT (31 - ne);
19121 /* This insn can also be a 64-bit rotate with mask that really makes
19122 it just a shift right (with mask); the %h below are to adjust for
19123 that situation (shift count is >= 32 in that case). */
19124 if (dot)
19125 return "rlw%I2nm. %0,%1,%h2,%3,%4";
19126 return "rlw%I2nm %0,%1,%h2,%3,%4";
19129 gcc_unreachable ();
19132 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
19133 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
19134 ASHIFT, or LSHIFTRT) in mode MODE. */
19136 bool
19137 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
19139 int nb, ne;
19141 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19142 return false;
19144 int n = GET_MODE_PRECISION (mode);
19146 int sh = INTVAL (XEXP (shift, 1));
19147 if (sh < 0 || sh >= n)
19148 return false;
19150 rtx_code code = GET_CODE (shift);
19152 /* Convert any shift by 0 to a rotate, to simplify below code. */
19153 if (sh == 0)
19154 code = ROTATE;
19156 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19157 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19158 code = ASHIFT;
19159 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19161 code = LSHIFTRT;
19162 sh = n - sh;
19165 /* DImode rotates need rldimi. */
19166 if (mode == DImode && code == ROTATE)
19167 return (ne == sh);
19169 /* SImode rotates need rlwimi. */
19170 if (mode == SImode && code == ROTATE)
19171 return (nb < 32 && ne < 32 && sh < 32);
19173 /* Wrap-around masks are only okay for rotates. */
19174 if (ne > nb)
19175 return false;
19177 /* Don't allow ASHIFT if the mask is wrong for that. */
19178 if (code == ASHIFT && ne < sh)
19179 return false;
19181 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19182 if the mask is wrong for that. */
19183 if (nb < 32 && ne < 32 && sh < 32
19184 && !(code == LSHIFTRT && nb >= 32 - sh))
19185 return true;
19187 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19188 if the mask is wrong for that. */
19189 if (code == LSHIFTRT)
19190 sh = 64 - sh;
19191 if (ne == sh)
19192 return !(code == LSHIFTRT && nb >= sh);
19194 return false;
19197 /* Return the instruction template for an insert with mask in mode MODE, with
19198 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19200 const char *
19201 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19203 int nb, ne;
19205 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19206 gcc_unreachable ();
19208 /* Prefer rldimi because rlwimi is cracked. */
19209 if (TARGET_POWERPC64
19210 && (!dot || mode == DImode)
19211 && GET_CODE (operands[4]) != LSHIFTRT
19212 && ne == INTVAL (operands[2]))
19214 operands[3] = GEN_INT (63 - nb);
19215 if (dot)
19216 return "rldimi. %0,%1,%2,%3";
19217 return "rldimi %0,%1,%2,%3";
19220 if (nb < 32 && ne < 32)
19222 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19223 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19224 operands[3] = GEN_INT (31 - nb);
19225 operands[4] = GEN_INT (31 - ne);
19226 if (dot)
19227 return "rlwimi. %0,%1,%2,%3,%4";
19228 return "rlwimi %0,%1,%2,%3,%4";
19231 gcc_unreachable ();
19234 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19235 using two machine instructions. */
19237 bool
19238 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19240 /* There are two kinds of AND we can handle with two insns:
19241 1) those we can do with two rl* insn;
19242 2) ori[s];xori[s].
19244 We do not handle that last case yet. */
19246 /* If there is just one stretch of ones, we can do it. */
19247 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19248 return true;
19250 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19251 one insn, we can do the whole thing with two. */
19252 unsigned HOST_WIDE_INT val = INTVAL (c);
19253 unsigned HOST_WIDE_INT bit1 = val & -val;
19254 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19255 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19256 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19257 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19260 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19261 If EXPAND is true, split rotate-and-mask instructions we generate to
19262 their constituent parts as well (this is used during expand); if DOT
19263 is 1, make the last insn a record-form instruction clobbering the
19264 destination GPR and setting the CC reg (from operands[3]); if 2, set
19265 that GPR as well as the CC reg. */
19267 void
19268 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19270 gcc_assert (!(expand && dot));
19272 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19274 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19275 shift right. This generates better code than doing the masks without
19276 shifts, or shifting first right and then left. */
19277 int nb, ne;
19278 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19280 gcc_assert (mode == DImode);
19282 int shift = 63 - nb;
19283 if (expand)
19285 rtx tmp1 = gen_reg_rtx (DImode);
19286 rtx tmp2 = gen_reg_rtx (DImode);
19287 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19288 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19289 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19291 else
19293 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19294 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19295 emit_move_insn (operands[0], tmp);
19296 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19297 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19299 return;
19302 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19303 that does the rest. */
19304 unsigned HOST_WIDE_INT bit1 = val & -val;
19305 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19306 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19307 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19309 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19310 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19312 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19314 /* Two "no-rotate"-and-mask instructions, for SImode. */
19315 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19317 gcc_assert (mode == SImode);
19319 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19320 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19321 emit_move_insn (reg, tmp);
19322 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19323 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19324 return;
19327 gcc_assert (mode == DImode);
19329 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19330 insns; we have to do the first in SImode, because it wraps. */
19331 if (mask2 <= 0xffffffff
19332 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19334 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19335 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19336 GEN_INT (mask1));
19337 rtx reg_low = gen_lowpart (SImode, reg);
19338 emit_move_insn (reg_low, tmp);
19339 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19340 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19341 return;
19344 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19345 at the top end), rotate back and clear the other hole. */
19346 int right = exact_log2 (bit3);
19347 int left = 64 - right;
19349 /* Rotate the mask too. */
19350 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19352 if (expand)
19354 rtx tmp1 = gen_reg_rtx (DImode);
19355 rtx tmp2 = gen_reg_rtx (DImode);
19356 rtx tmp3 = gen_reg_rtx (DImode);
19357 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19358 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19359 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19360 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19362 else
19364 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19365 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19366 emit_move_insn (operands[0], tmp);
19367 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19368 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19369 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19373 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19374 for lfq and stfq insns iff the registers are hard registers. */
19377 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19379 /* We might have been passed a SUBREG. */
19380 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19381 return 0;
19383 /* We might have been passed non floating point registers. */
19384 if (!FP_REGNO_P (REGNO (reg1))
19385 || !FP_REGNO_P (REGNO (reg2)))
19386 return 0;
19388 return (REGNO (reg1) == REGNO (reg2) - 1);
19391 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19392 addr1 and addr2 must be in consecutive memory locations
19393 (addr2 == addr1 + 8). */
19396 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19398 rtx addr1, addr2;
19399 unsigned int reg1, reg2;
19400 int offset1, offset2;
19402 /* The mems cannot be volatile. */
19403 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19404 return 0;
19406 addr1 = XEXP (mem1, 0);
19407 addr2 = XEXP (mem2, 0);
19409 /* Extract an offset (if used) from the first addr. */
19410 if (GET_CODE (addr1) == PLUS)
19412 /* If not a REG, return zero. */
19413 if (GET_CODE (XEXP (addr1, 0)) != REG)
19414 return 0;
19415 else
19417 reg1 = REGNO (XEXP (addr1, 0));
19418 /* The offset must be constant! */
19419 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19420 return 0;
19421 offset1 = INTVAL (XEXP (addr1, 1));
19424 else if (GET_CODE (addr1) != REG)
19425 return 0;
19426 else
19428 reg1 = REGNO (addr1);
19429 /* This was a simple (mem (reg)) expression. Offset is 0. */
19430 offset1 = 0;
19433 /* And now for the second addr. */
19434 if (GET_CODE (addr2) == PLUS)
19436 /* If not a REG, return zero. */
19437 if (GET_CODE (XEXP (addr2, 0)) != REG)
19438 return 0;
19439 else
19441 reg2 = REGNO (XEXP (addr2, 0));
19442 /* The offset must be constant. */
19443 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19444 return 0;
19445 offset2 = INTVAL (XEXP (addr2, 1));
19448 else if (GET_CODE (addr2) != REG)
19449 return 0;
19450 else
19452 reg2 = REGNO (addr2);
19453 /* This was a simple (mem (reg)) expression. Offset is 0. */
19454 offset2 = 0;
19457 /* Both of these must have the same base register. */
19458 if (reg1 != reg2)
19459 return 0;
19461 /* The offset for the second addr must be 8 more than the first addr. */
19462 if (offset2 != offset1 + 8)
19463 return 0;
19465 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19466 instructions. */
19467 return 1;
19470 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19471 need to use DDmode, in all other cases we can use the same mode. */
19472 static machine_mode
19473 rs6000_secondary_memory_needed_mode (machine_mode mode)
19475 if (lra_in_progress && mode == SDmode)
19476 return DDmode;
19477 return mode;
19480 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19481 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19482 only work on the traditional altivec registers, note if an altivec register
19483 was chosen. */
19485 static enum rs6000_reg_type
19486 register_to_reg_type (rtx reg, bool *is_altivec)
19488 HOST_WIDE_INT regno;
19489 enum reg_class rclass;
19491 if (GET_CODE (reg) == SUBREG)
19492 reg = SUBREG_REG (reg);
19494 if (!REG_P (reg))
19495 return NO_REG_TYPE;
19497 regno = REGNO (reg);
19498 if (regno >= FIRST_PSEUDO_REGISTER)
19500 if (!lra_in_progress && !reload_completed)
19501 return PSEUDO_REG_TYPE;
19503 regno = true_regnum (reg);
19504 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19505 return PSEUDO_REG_TYPE;
19508 gcc_assert (regno >= 0);
19510 if (is_altivec && ALTIVEC_REGNO_P (regno))
19511 *is_altivec = true;
19513 rclass = rs6000_regno_regclass[regno];
19514 return reg_class_to_reg_type[(int)rclass];
19517 /* Helper function to return the cost of adding a TOC entry address. */
19519 static inline int
19520 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19522 int ret;
19524 if (TARGET_CMODEL != CMODEL_SMALL)
19525 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19527 else
19528 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19530 return ret;
19533 /* Helper function for rs6000_secondary_reload to determine whether the memory
19534 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19535 needs reloading. Return negative if the memory is not handled by the memory
19536 helper functions and to try a different reload method, 0 if no additional
19537 instructions are need, and positive to give the extra cost for the
19538 memory. */
19540 static int
19541 rs6000_secondary_reload_memory (rtx addr,
19542 enum reg_class rclass,
19543 machine_mode mode)
19545 int extra_cost = 0;
19546 rtx reg, and_arg, plus_arg0, plus_arg1;
19547 addr_mask_type addr_mask;
19548 const char *type = NULL;
19549 const char *fail_msg = NULL;
19551 if (GPR_REG_CLASS_P (rclass))
19552 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19554 else if (rclass == FLOAT_REGS)
19555 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19557 else if (rclass == ALTIVEC_REGS)
19558 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19560 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19561 else if (rclass == VSX_REGS)
19562 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19563 & ~RELOAD_REG_AND_M16);
19565 /* If the register allocator hasn't made up its mind yet on the register
19566 class to use, settle on defaults to use. */
19567 else if (rclass == NO_REGS)
19569 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19570 & ~RELOAD_REG_AND_M16);
19572 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19573 addr_mask &= ~(RELOAD_REG_INDEXED
19574 | RELOAD_REG_PRE_INCDEC
19575 | RELOAD_REG_PRE_MODIFY);
19578 else
19579 addr_mask = 0;
19581 /* If the register isn't valid in this register class, just return now. */
19582 if ((addr_mask & RELOAD_REG_VALID) == 0)
19584 if (TARGET_DEBUG_ADDR)
19586 fprintf (stderr,
19587 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19588 "not valid in class\n",
19589 GET_MODE_NAME (mode), reg_class_names[rclass]);
19590 debug_rtx (addr);
19593 return -1;
19596 switch (GET_CODE (addr))
19598 /* Does the register class supports auto update forms for this mode? We
19599 don't need a scratch register, since the powerpc only supports
19600 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19601 case PRE_INC:
19602 case PRE_DEC:
19603 reg = XEXP (addr, 0);
19604 if (!base_reg_operand (addr, GET_MODE (reg)))
19606 fail_msg = "no base register #1";
19607 extra_cost = -1;
19610 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19612 extra_cost = 1;
19613 type = "update";
19615 break;
19617 case PRE_MODIFY:
19618 reg = XEXP (addr, 0);
19619 plus_arg1 = XEXP (addr, 1);
19620 if (!base_reg_operand (reg, GET_MODE (reg))
19621 || GET_CODE (plus_arg1) != PLUS
19622 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19624 fail_msg = "bad PRE_MODIFY";
19625 extra_cost = -1;
19628 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19630 extra_cost = 1;
19631 type = "update";
19633 break;
19635 /* Do we need to simulate AND -16 to clear the bottom address bits used
19636 in VMX load/stores? Only allow the AND for vector sizes. */
19637 case AND:
19638 and_arg = XEXP (addr, 0);
19639 if (GET_MODE_SIZE (mode) != 16
19640 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19641 || INTVAL (XEXP (addr, 1)) != -16)
19643 fail_msg = "bad Altivec AND #1";
19644 extra_cost = -1;
19647 if (rclass != ALTIVEC_REGS)
19649 if (legitimate_indirect_address_p (and_arg, false))
19650 extra_cost = 1;
19652 else if (legitimate_indexed_address_p (and_arg, false))
19653 extra_cost = 2;
19655 else
19657 fail_msg = "bad Altivec AND #2";
19658 extra_cost = -1;
19661 type = "and";
19663 break;
19665 /* If this is an indirect address, make sure it is a base register. */
19666 case REG:
19667 case SUBREG:
19668 if (!legitimate_indirect_address_p (addr, false))
19670 extra_cost = 1;
19671 type = "move";
19673 break;
19675 /* If this is an indexed address, make sure the register class can handle
19676 indexed addresses for this mode. */
19677 case PLUS:
19678 plus_arg0 = XEXP (addr, 0);
19679 plus_arg1 = XEXP (addr, 1);
19681 /* (plus (plus (reg) (constant)) (constant)) is generated during
19682 push_reload processing, so handle it now. */
19683 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19685 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19687 extra_cost = 1;
19688 type = "offset";
19692 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19693 push_reload processing, so handle it now. */
19694 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19696 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19698 extra_cost = 1;
19699 type = "indexed #2";
19703 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19705 fail_msg = "no base register #2";
19706 extra_cost = -1;
19709 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19711 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19712 || !legitimate_indexed_address_p (addr, false))
19714 extra_cost = 1;
19715 type = "indexed";
19719 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19720 && CONST_INT_P (plus_arg1))
19722 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19724 extra_cost = 1;
19725 type = "vector d-form offset";
19729 /* Make sure the register class can handle offset addresses. */
19730 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19732 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19734 extra_cost = 1;
19735 type = "offset #2";
19739 else
19741 fail_msg = "bad PLUS";
19742 extra_cost = -1;
19745 break;
19747 case LO_SUM:
19748 /* Quad offsets are restricted and can't handle normal addresses. */
19749 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19751 extra_cost = -1;
19752 type = "vector d-form lo_sum";
19755 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19757 fail_msg = "bad LO_SUM";
19758 extra_cost = -1;
19761 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19763 extra_cost = 1;
19764 type = "lo_sum";
19766 break;
19768 /* Static addresses need to create a TOC entry. */
19769 case CONST:
19770 case SYMBOL_REF:
19771 case LABEL_REF:
19772 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19774 extra_cost = -1;
19775 type = "vector d-form lo_sum #2";
19778 else
19780 type = "address";
19781 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19783 break;
19785 /* TOC references look like offsetable memory. */
19786 case UNSPEC:
19787 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19789 fail_msg = "bad UNSPEC";
19790 extra_cost = -1;
19793 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19795 extra_cost = -1;
19796 type = "vector d-form lo_sum #3";
19799 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19801 extra_cost = 1;
19802 type = "toc reference";
19804 break;
19806 default:
19808 fail_msg = "bad address";
19809 extra_cost = -1;
19813 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19815 if (extra_cost < 0)
19816 fprintf (stderr,
19817 "rs6000_secondary_reload_memory error: mode = %s, "
19818 "class = %s, addr_mask = '%s', %s\n",
19819 GET_MODE_NAME (mode),
19820 reg_class_names[rclass],
19821 rs6000_debug_addr_mask (addr_mask, false),
19822 (fail_msg != NULL) ? fail_msg : "<bad address>");
19824 else
19825 fprintf (stderr,
19826 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19827 "addr_mask = '%s', extra cost = %d, %s\n",
19828 GET_MODE_NAME (mode),
19829 reg_class_names[rclass],
19830 rs6000_debug_addr_mask (addr_mask, false),
19831 extra_cost,
19832 (type) ? type : "<none>");
19834 debug_rtx (addr);
19837 return extra_cost;
19840 /* Helper function for rs6000_secondary_reload to return true if a move to a
19841 different register classe is really a simple move. */
19843 static bool
19844 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19845 enum rs6000_reg_type from_type,
19846 machine_mode mode)
19848 int size = GET_MODE_SIZE (mode);
19850 /* Add support for various direct moves available. In this function, we only
19851 look at cases where we don't need any extra registers, and one or more
19852 simple move insns are issued. Originally small integers are not allowed
19853 in FPR/VSX registers. Single precision binary floating is not a simple
19854 move because we need to convert to the single precision memory layout.
19855 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19856 need special direct move handling, which we do not support yet. */
19857 if (TARGET_DIRECT_MOVE
19858 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19859 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19861 if (TARGET_POWERPC64)
19863 /* ISA 2.07: MTVSRD or MVFVSRD. */
19864 if (size == 8)
19865 return true;
19867 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19868 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19869 return true;
19872 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19873 if (TARGET_P8_VECTOR)
19875 if (mode == SImode)
19876 return true;
19878 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19879 return true;
19882 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19883 if (mode == SDmode)
19884 return true;
19887 /* Power6+: MFTGPR or MFFGPR. */
19888 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19889 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19890 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19891 return true;
19893 /* Move to/from SPR. */
19894 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19895 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19896 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19897 return true;
19899 return false;
19902 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19903 special direct moves that involve allocating an extra register, return the
19904 insn code of the helper function if there is such a function or
19905 CODE_FOR_nothing if not. */
19907 static bool
19908 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19909 enum rs6000_reg_type from_type,
19910 machine_mode mode,
19911 secondary_reload_info *sri,
19912 bool altivec_p)
19914 bool ret = false;
19915 enum insn_code icode = CODE_FOR_nothing;
19916 int cost = 0;
19917 int size = GET_MODE_SIZE (mode);
19919 if (TARGET_POWERPC64 && size == 16)
19921 /* Handle moving 128-bit values from GPRs to VSX point registers on
19922 ISA 2.07 (power8, power9) when running in 64-bit mode using
19923 XXPERMDI to glue the two 64-bit values back together. */
19924 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19926 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19927 icode = reg_addr[mode].reload_vsx_gpr;
19930 /* Handle moving 128-bit values from VSX point registers to GPRs on
19931 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19932 bottom 64-bit value. */
19933 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19935 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19936 icode = reg_addr[mode].reload_gpr_vsx;
19940 else if (TARGET_POWERPC64 && mode == SFmode)
19942 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19944 cost = 3; /* xscvdpspn, mfvsrd, and. */
19945 icode = reg_addr[mode].reload_gpr_vsx;
19948 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19950 cost = 2; /* mtvsrz, xscvspdpn. */
19951 icode = reg_addr[mode].reload_vsx_gpr;
19955 else if (!TARGET_POWERPC64 && size == 8)
19957 /* Handle moving 64-bit values from GPRs to floating point registers on
19958 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19959 32-bit values back together. Altivec register classes must be handled
19960 specially since a different instruction is used, and the secondary
19961 reload support requires a single instruction class in the scratch
19962 register constraint. However, right now TFmode is not allowed in
19963 Altivec registers, so the pattern will never match. */
19964 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19966 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19967 icode = reg_addr[mode].reload_fpr_gpr;
19971 if (icode != CODE_FOR_nothing)
19973 ret = true;
19974 if (sri)
19976 sri->icode = icode;
19977 sri->extra_cost = cost;
19981 return ret;
19984 /* Return whether a move between two register classes can be done either
19985 directly (simple move) or via a pattern that uses a single extra temporary
19986 (using ISA 2.07's direct move in this case. */
19988 static bool
19989 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19990 enum rs6000_reg_type from_type,
19991 machine_mode mode,
19992 secondary_reload_info *sri,
19993 bool altivec_p)
19995 /* Fall back to load/store reloads if either type is not a register. */
19996 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19997 return false;
19999 /* If we haven't allocated registers yet, assume the move can be done for the
20000 standard register types. */
20001 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
20002 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
20003 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
20004 return true;
20006 /* Moves to the same set of registers is a simple move for non-specialized
20007 registers. */
20008 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
20009 return true;
20011 /* Check whether a simple move can be done directly. */
20012 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
20014 if (sri)
20016 sri->icode = CODE_FOR_nothing;
20017 sri->extra_cost = 0;
20019 return true;
20022 /* Now check if we can do it in a few steps. */
20023 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
20024 altivec_p);
20027 /* Inform reload about cases where moving X with a mode MODE to a register in
20028 RCLASS requires an extra scratch or immediate register. Return the class
20029 needed for the immediate register.
20031 For VSX and Altivec, we may need a register to convert sp+offset into
20032 reg+sp.
20034 For misaligned 64-bit gpr loads and stores we need a register to
20035 convert an offset address to indirect. */
20037 static reg_class_t
20038 rs6000_secondary_reload (bool in_p,
20039 rtx x,
20040 reg_class_t rclass_i,
20041 machine_mode mode,
20042 secondary_reload_info *sri)
20044 enum reg_class rclass = (enum reg_class) rclass_i;
20045 reg_class_t ret = ALL_REGS;
20046 enum insn_code icode;
20047 bool default_p = false;
20048 bool done_p = false;
20050 /* Allow subreg of memory before/during reload. */
20051 bool memory_p = (MEM_P (x)
20052 || (!reload_completed && GET_CODE (x) == SUBREG
20053 && MEM_P (SUBREG_REG (x))));
20055 sri->icode = CODE_FOR_nothing;
20056 sri->t_icode = CODE_FOR_nothing;
20057 sri->extra_cost = 0;
20058 icode = ((in_p)
20059 ? reg_addr[mode].reload_load
20060 : reg_addr[mode].reload_store);
20062 if (REG_P (x) || register_operand (x, mode))
20064 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
20065 bool altivec_p = (rclass == ALTIVEC_REGS);
20066 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
20068 if (!in_p)
20069 std::swap (to_type, from_type);
20071 /* Can we do a direct move of some sort? */
20072 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
20073 altivec_p))
20075 icode = (enum insn_code)sri->icode;
20076 default_p = false;
20077 done_p = true;
20078 ret = NO_REGS;
20082 /* Make sure 0.0 is not reloaded or forced into memory. */
20083 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
20085 ret = NO_REGS;
20086 default_p = false;
20087 done_p = true;
20090 /* If this is a scalar floating point value and we want to load it into the
20091 traditional Altivec registers, do it via a move via a traditional floating
20092 point register, unless we have D-form addressing. Also make sure that
20093 non-zero constants use a FPR. */
20094 if (!done_p && reg_addr[mode].scalar_in_vmx_p
20095 && !mode_supports_vmx_dform (mode)
20096 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20097 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
20099 ret = FLOAT_REGS;
20100 default_p = false;
20101 done_p = true;
20104 /* Handle reload of load/stores if we have reload helper functions. */
20105 if (!done_p && icode != CODE_FOR_nothing && memory_p)
20107 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
20108 mode);
20110 if (extra_cost >= 0)
20112 done_p = true;
20113 ret = NO_REGS;
20114 if (extra_cost > 0)
20116 sri->extra_cost = extra_cost;
20117 sri->icode = icode;
20122 /* Handle unaligned loads and stores of integer registers. */
20123 if (!done_p && TARGET_POWERPC64
20124 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20125 && memory_p
20126 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
20128 rtx addr = XEXP (x, 0);
20129 rtx off = address_offset (addr);
20131 if (off != NULL_RTX)
20133 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20134 unsigned HOST_WIDE_INT offset = INTVAL (off);
20136 /* We need a secondary reload when our legitimate_address_p
20137 says the address is good (as otherwise the entire address
20138 will be reloaded), and the offset is not a multiple of
20139 four or we have an address wrap. Address wrap will only
20140 occur for LO_SUMs since legitimate_offset_address_p
20141 rejects addresses for 16-byte mems that will wrap. */
20142 if (GET_CODE (addr) == LO_SUM
20143 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20144 && ((offset & 3) != 0
20145 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
20146 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
20147 && (offset & 3) != 0))
20149 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20150 if (in_p)
20151 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
20152 : CODE_FOR_reload_di_load);
20153 else
20154 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
20155 : CODE_FOR_reload_di_store);
20156 sri->extra_cost = 2;
20157 ret = NO_REGS;
20158 done_p = true;
20160 else
20161 default_p = true;
20163 else
20164 default_p = true;
20167 if (!done_p && !TARGET_POWERPC64
20168 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20169 && memory_p
20170 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
20172 rtx addr = XEXP (x, 0);
20173 rtx off = address_offset (addr);
20175 if (off != NULL_RTX)
20177 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20178 unsigned HOST_WIDE_INT offset = INTVAL (off);
20180 /* We need a secondary reload when our legitimate_address_p
20181 says the address is good (as otherwise the entire address
20182 will be reloaded), and we have a wrap.
20184 legitimate_lo_sum_address_p allows LO_SUM addresses to
20185 have any offset so test for wrap in the low 16 bits.
20187 legitimate_offset_address_p checks for the range
20188 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20189 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20190 [0x7ff4,0x7fff] respectively, so test for the
20191 intersection of these ranges, [0x7ffc,0x7fff] and
20192 [0x7ff4,0x7ff7] respectively.
20194 Note that the address we see here may have been
20195 manipulated by legitimize_reload_address. */
20196 if (GET_CODE (addr) == LO_SUM
20197 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20198 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20200 if (in_p)
20201 sri->icode = CODE_FOR_reload_si_load;
20202 else
20203 sri->icode = CODE_FOR_reload_si_store;
20204 sri->extra_cost = 2;
20205 ret = NO_REGS;
20206 done_p = true;
20208 else
20209 default_p = true;
20211 else
20212 default_p = true;
20215 if (!done_p)
20216 default_p = true;
20218 if (default_p)
20219 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20221 gcc_assert (ret != ALL_REGS);
20223 if (TARGET_DEBUG_ADDR)
20225 fprintf (stderr,
20226 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20227 "mode = %s",
20228 reg_class_names[ret],
20229 in_p ? "true" : "false",
20230 reg_class_names[rclass],
20231 GET_MODE_NAME (mode));
20233 if (reload_completed)
20234 fputs (", after reload", stderr);
20236 if (!done_p)
20237 fputs (", done_p not set", stderr);
20239 if (default_p)
20240 fputs (", default secondary reload", stderr);
20242 if (sri->icode != CODE_FOR_nothing)
20243 fprintf (stderr, ", reload func = %s, extra cost = %d",
20244 insn_data[sri->icode].name, sri->extra_cost);
20246 else if (sri->extra_cost > 0)
20247 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20249 fputs ("\n", stderr);
20250 debug_rtx (x);
20253 return ret;
20256 /* Better tracing for rs6000_secondary_reload_inner. */
20258 static void
20259 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20260 bool store_p)
20262 rtx set, clobber;
20264 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20266 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20267 store_p ? "store" : "load");
20269 if (store_p)
20270 set = gen_rtx_SET (mem, reg);
20271 else
20272 set = gen_rtx_SET (reg, mem);
20274 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20275 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20278 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20279 ATTRIBUTE_NORETURN;
20281 static void
20282 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20283 bool store_p)
20285 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20286 gcc_unreachable ();
20289 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20290 reload helper functions. These were identified in
20291 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20292 reload, it calls the insns:
20293 reload_<RELOAD:mode>_<P:mptrsize>_store
20294 reload_<RELOAD:mode>_<P:mptrsize>_load
20296 which in turn calls this function, to do whatever is necessary to create
20297 valid addresses. */
20299 void
20300 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20302 int regno = true_regnum (reg);
20303 machine_mode mode = GET_MODE (reg);
20304 addr_mask_type addr_mask;
20305 rtx addr;
20306 rtx new_addr;
20307 rtx op_reg, op0, op1;
20308 rtx and_op;
20309 rtx cc_clobber;
20310 rtvec rv;
20312 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20313 || !base_reg_operand (scratch, GET_MODE (scratch)))
20314 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20316 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20317 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20319 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20320 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20322 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20323 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20325 else
20326 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20328 /* Make sure the mode is valid in this register class. */
20329 if ((addr_mask & RELOAD_REG_VALID) == 0)
20330 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20332 if (TARGET_DEBUG_ADDR)
20333 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20335 new_addr = addr = XEXP (mem, 0);
20336 switch (GET_CODE (addr))
20338 /* Does the register class support auto update forms for this mode? If
20339 not, do the update now. We don't need a scratch register, since the
20340 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20341 case PRE_INC:
20342 case PRE_DEC:
20343 op_reg = XEXP (addr, 0);
20344 if (!base_reg_operand (op_reg, Pmode))
20345 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20347 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20349 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20350 new_addr = op_reg;
20352 break;
20354 case PRE_MODIFY:
20355 op0 = XEXP (addr, 0);
20356 op1 = XEXP (addr, 1);
20357 if (!base_reg_operand (op0, Pmode)
20358 || GET_CODE (op1) != PLUS
20359 || !rtx_equal_p (op0, XEXP (op1, 0)))
20360 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20362 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20364 emit_insn (gen_rtx_SET (op0, op1));
20365 new_addr = reg;
20367 break;
20369 /* Do we need to simulate AND -16 to clear the bottom address bits used
20370 in VMX load/stores? */
20371 case AND:
20372 op0 = XEXP (addr, 0);
20373 op1 = XEXP (addr, 1);
20374 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20376 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20377 op_reg = op0;
20379 else if (GET_CODE (op1) == PLUS)
20381 emit_insn (gen_rtx_SET (scratch, op1));
20382 op_reg = scratch;
20385 else
20386 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20388 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20389 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20390 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20391 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20392 new_addr = scratch;
20394 break;
20396 /* If this is an indirect address, make sure it is a base register. */
20397 case REG:
20398 case SUBREG:
20399 if (!base_reg_operand (addr, GET_MODE (addr)))
20401 emit_insn (gen_rtx_SET (scratch, addr));
20402 new_addr = scratch;
20404 break;
20406 /* If this is an indexed address, make sure the register class can handle
20407 indexed addresses for this mode. */
20408 case PLUS:
20409 op0 = XEXP (addr, 0);
20410 op1 = XEXP (addr, 1);
20411 if (!base_reg_operand (op0, Pmode))
20412 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20414 else if (int_reg_operand (op1, Pmode))
20416 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20418 emit_insn (gen_rtx_SET (scratch, addr));
20419 new_addr = scratch;
20423 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20425 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20426 || !quad_address_p (addr, mode, false))
20428 emit_insn (gen_rtx_SET (scratch, addr));
20429 new_addr = scratch;
20433 /* Make sure the register class can handle offset addresses. */
20434 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20436 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20438 emit_insn (gen_rtx_SET (scratch, addr));
20439 new_addr = scratch;
20443 else
20444 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20446 break;
20448 case LO_SUM:
20449 op0 = XEXP (addr, 0);
20450 op1 = XEXP (addr, 1);
20451 if (!base_reg_operand (op0, Pmode))
20452 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20454 else if (int_reg_operand (op1, Pmode))
20456 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20458 emit_insn (gen_rtx_SET (scratch, addr));
20459 new_addr = scratch;
20463 /* Quad offsets are restricted and can't handle normal addresses. */
20464 else if (mode_supports_vsx_dform_quad (mode))
20466 emit_insn (gen_rtx_SET (scratch, addr));
20467 new_addr = scratch;
20470 /* Make sure the register class can handle offset addresses. */
20471 else if (legitimate_lo_sum_address_p (mode, addr, false))
20473 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20475 emit_insn (gen_rtx_SET (scratch, addr));
20476 new_addr = scratch;
20480 else
20481 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20483 break;
20485 case SYMBOL_REF:
20486 case CONST:
20487 case LABEL_REF:
20488 rs6000_emit_move (scratch, addr, Pmode);
20489 new_addr = scratch;
20490 break;
20492 default:
20493 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20496 /* Adjust the address if it changed. */
20497 if (addr != new_addr)
20499 mem = replace_equiv_address_nv (mem, new_addr);
20500 if (TARGET_DEBUG_ADDR)
20501 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20504 /* Now create the move. */
20505 if (store_p)
20506 emit_insn (gen_rtx_SET (mem, reg));
20507 else
20508 emit_insn (gen_rtx_SET (reg, mem));
20510 return;
20513 /* Convert reloads involving 64-bit gprs and misaligned offset
20514 addressing, or multiple 32-bit gprs and offsets that are too large,
20515 to use indirect addressing. */
20517 void
20518 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20520 int regno = true_regnum (reg);
20521 enum reg_class rclass;
20522 rtx addr;
20523 rtx scratch_or_premodify = scratch;
20525 if (TARGET_DEBUG_ADDR)
20527 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20528 store_p ? "store" : "load");
20529 fprintf (stderr, "reg:\n");
20530 debug_rtx (reg);
20531 fprintf (stderr, "mem:\n");
20532 debug_rtx (mem);
20533 fprintf (stderr, "scratch:\n");
20534 debug_rtx (scratch);
20537 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20538 gcc_assert (GET_CODE (mem) == MEM);
20539 rclass = REGNO_REG_CLASS (regno);
20540 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20541 addr = XEXP (mem, 0);
20543 if (GET_CODE (addr) == PRE_MODIFY)
20545 gcc_assert (REG_P (XEXP (addr, 0))
20546 && GET_CODE (XEXP (addr, 1)) == PLUS
20547 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20548 scratch_or_premodify = XEXP (addr, 0);
20549 if (!HARD_REGISTER_P (scratch_or_premodify))
20550 /* If we have a pseudo here then reload will have arranged
20551 to have it replaced, but only in the original insn.
20552 Use the replacement here too. */
20553 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20555 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20556 expressions from the original insn, without unsharing them.
20557 Any RTL that points into the original insn will of course
20558 have register replacements applied. That is why we don't
20559 need to look for replacements under the PLUS. */
20560 addr = XEXP (addr, 1);
20562 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20564 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20566 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20568 /* Now create the move. */
20569 if (store_p)
20570 emit_insn (gen_rtx_SET (mem, reg));
20571 else
20572 emit_insn (gen_rtx_SET (reg, mem));
20574 return;
20577 /* Given an rtx X being reloaded into a reg required to be
20578 in class CLASS, return the class of reg to actually use.
20579 In general this is just CLASS; but on some machines
20580 in some cases it is preferable to use a more restrictive class.
20582 On the RS/6000, we have to return NO_REGS when we want to reload a
20583 floating-point CONST_DOUBLE to force it to be copied to memory.
20585 We also don't want to reload integer values into floating-point
20586 registers if we can at all help it. In fact, this can
20587 cause reload to die, if it tries to generate a reload of CTR
20588 into a FP register and discovers it doesn't have the memory location
20589 required.
20591 ??? Would it be a good idea to have reload do the converse, that is
20592 try to reload floating modes into FP registers if possible?
20595 static enum reg_class
20596 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20598 machine_mode mode = GET_MODE (x);
20599 bool is_constant = CONSTANT_P (x);
20601 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20602 reload class for it. */
20603 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20604 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20605 return NO_REGS;
20607 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20608 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20609 return NO_REGS;
20611 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20612 the reloading of address expressions using PLUS into floating point
20613 registers. */
20614 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20616 if (is_constant)
20618 /* Zero is always allowed in all VSX registers. */
20619 if (x == CONST0_RTX (mode))
20620 return rclass;
20622 /* If this is a vector constant that can be formed with a few Altivec
20623 instructions, we want altivec registers. */
20624 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20625 return ALTIVEC_REGS;
20627 /* If this is an integer constant that can easily be loaded into
20628 vector registers, allow it. */
20629 if (CONST_INT_P (x))
20631 HOST_WIDE_INT value = INTVAL (x);
20633 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20634 2.06 can generate it in the Altivec registers with
20635 VSPLTI<x>. */
20636 if (value == -1)
20638 if (TARGET_P8_VECTOR)
20639 return rclass;
20640 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20641 return ALTIVEC_REGS;
20642 else
20643 return NO_REGS;
20646 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20647 a sign extend in the Altivec registers. */
20648 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20649 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20650 return ALTIVEC_REGS;
20653 /* Force constant to memory. */
20654 return NO_REGS;
20657 /* D-form addressing can easily reload the value. */
20658 if (mode_supports_vmx_dform (mode)
20659 || mode_supports_vsx_dform_quad (mode))
20660 return rclass;
20662 /* If this is a scalar floating point value and we don't have D-form
20663 addressing, prefer the traditional floating point registers so that we
20664 can use D-form (register+offset) addressing. */
20665 if (rclass == VSX_REGS
20666 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20667 return FLOAT_REGS;
20669 /* Prefer the Altivec registers if Altivec is handling the vector
20670 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20671 loads. */
20672 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20673 || mode == V1TImode)
20674 return ALTIVEC_REGS;
20676 return rclass;
20679 if (is_constant || GET_CODE (x) == PLUS)
20681 if (reg_class_subset_p (GENERAL_REGS, rclass))
20682 return GENERAL_REGS;
20683 if (reg_class_subset_p (BASE_REGS, rclass))
20684 return BASE_REGS;
20685 return NO_REGS;
20688 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20689 return GENERAL_REGS;
20691 return rclass;
20694 /* Debug version of rs6000_preferred_reload_class. */
20695 static enum reg_class
20696 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20698 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20700 fprintf (stderr,
20701 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20702 "mode = %s, x:\n",
20703 reg_class_names[ret], reg_class_names[rclass],
20704 GET_MODE_NAME (GET_MODE (x)));
20705 debug_rtx (x);
20707 return ret;
20710 /* If we are copying between FP or AltiVec registers and anything else, we need
20711 a memory location. The exception is when we are targeting ppc64 and the
20712 move to/from fpr to gpr instructions are available. Also, under VSX, you
20713 can copy vector registers from the FP register set to the Altivec register
20714 set and vice versa. */
20716 static bool
20717 rs6000_secondary_memory_needed (machine_mode mode,
20718 reg_class_t from_class,
20719 reg_class_t to_class)
20721 enum rs6000_reg_type from_type, to_type;
20722 bool altivec_p = ((from_class == ALTIVEC_REGS)
20723 || (to_class == ALTIVEC_REGS));
20725 /* If a simple/direct move is available, we don't need secondary memory */
20726 from_type = reg_class_to_reg_type[(int)from_class];
20727 to_type = reg_class_to_reg_type[(int)to_class];
20729 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20730 (secondary_reload_info *)0, altivec_p))
20731 return false;
20733 /* If we have a floating point or vector register class, we need to use
20734 memory to transfer the data. */
20735 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20736 return true;
20738 return false;
20741 /* Debug version of rs6000_secondary_memory_needed. */
20742 static bool
20743 rs6000_debug_secondary_memory_needed (machine_mode mode,
20744 reg_class_t from_class,
20745 reg_class_t to_class)
20747 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20749 fprintf (stderr,
20750 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20751 "to_class = %s, mode = %s\n",
20752 ret ? "true" : "false",
20753 reg_class_names[from_class],
20754 reg_class_names[to_class],
20755 GET_MODE_NAME (mode));
20757 return ret;
20760 /* Return the register class of a scratch register needed to copy IN into
20761 or out of a register in RCLASS in MODE. If it can be done directly,
20762 NO_REGS is returned. */
20764 static enum reg_class
20765 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20766 rtx in)
20768 int regno;
20770 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20771 #if TARGET_MACHO
20772 && MACHOPIC_INDIRECT
20773 #endif
20776 /* We cannot copy a symbolic operand directly into anything
20777 other than BASE_REGS for TARGET_ELF. So indicate that a
20778 register from BASE_REGS is needed as an intermediate
20779 register.
20781 On Darwin, pic addresses require a load from memory, which
20782 needs a base register. */
20783 if (rclass != BASE_REGS
20784 && (GET_CODE (in) == SYMBOL_REF
20785 || GET_CODE (in) == HIGH
20786 || GET_CODE (in) == LABEL_REF
20787 || GET_CODE (in) == CONST))
20788 return BASE_REGS;
20791 if (GET_CODE (in) == REG)
20793 regno = REGNO (in);
20794 if (regno >= FIRST_PSEUDO_REGISTER)
20796 regno = true_regnum (in);
20797 if (regno >= FIRST_PSEUDO_REGISTER)
20798 regno = -1;
20801 else if (GET_CODE (in) == SUBREG)
20803 regno = true_regnum (in);
20804 if (regno >= FIRST_PSEUDO_REGISTER)
20805 regno = -1;
20807 else
20808 regno = -1;
20810 /* If we have VSX register moves, prefer moving scalar values between
20811 Altivec registers and GPR by going via an FPR (and then via memory)
20812 instead of reloading the secondary memory address for Altivec moves. */
20813 if (TARGET_VSX
20814 && GET_MODE_SIZE (mode) < 16
20815 && !mode_supports_vmx_dform (mode)
20816 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20817 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20818 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20819 && (regno >= 0 && INT_REGNO_P (regno)))))
20820 return FLOAT_REGS;
20822 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20823 into anything. */
20824 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20825 || (regno >= 0 && INT_REGNO_P (regno)))
20826 return NO_REGS;
20828 /* Constants, memory, and VSX registers can go into VSX registers (both the
20829 traditional floating point and the altivec registers). */
20830 if (rclass == VSX_REGS
20831 && (regno == -1 || VSX_REGNO_P (regno)))
20832 return NO_REGS;
20834 /* Constants, memory, and FP registers can go into FP registers. */
20835 if ((regno == -1 || FP_REGNO_P (regno))
20836 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20837 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20839 /* Memory, and AltiVec registers can go into AltiVec registers. */
20840 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20841 && rclass == ALTIVEC_REGS)
20842 return NO_REGS;
20844 /* We can copy among the CR registers. */
20845 if ((rclass == CR_REGS || rclass == CR0_REGS)
20846 && regno >= 0 && CR_REGNO_P (regno))
20847 return NO_REGS;
20849 /* Otherwise, we need GENERAL_REGS. */
20850 return GENERAL_REGS;
20853 /* Debug version of rs6000_secondary_reload_class. */
20854 static enum reg_class
20855 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20856 machine_mode mode, rtx in)
20858 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20859 fprintf (stderr,
20860 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20861 "mode = %s, input rtx:\n",
20862 reg_class_names[ret], reg_class_names[rclass],
20863 GET_MODE_NAME (mode));
20864 debug_rtx (in);
20866 return ret;
20869 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20871 static bool
20872 rs6000_can_change_mode_class (machine_mode from,
20873 machine_mode to,
20874 reg_class_t rclass)
20876 unsigned from_size = GET_MODE_SIZE (from);
20877 unsigned to_size = GET_MODE_SIZE (to);
20879 if (from_size != to_size)
20881 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20883 if (reg_classes_intersect_p (xclass, rclass))
20885 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20886 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20887 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20888 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20890 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20891 single register under VSX because the scalar part of the register
20892 is in the upper 64-bits, and not the lower 64-bits. Types like
20893 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20894 IEEE floating point can't overlap, and neither can small
20895 values. */
20897 if (to_float128_vector_p && from_float128_vector_p)
20898 return true;
20900 else if (to_float128_vector_p || from_float128_vector_p)
20901 return false;
20903 /* TDmode in floating-mode registers must always go into a register
20904 pair with the most significant word in the even-numbered register
20905 to match ISA requirements. In little-endian mode, this does not
20906 match subreg numbering, so we cannot allow subregs. */
20907 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20908 return false;
20910 if (from_size < 8 || to_size < 8)
20911 return false;
20913 if (from_size == 8 && (8 * to_nregs) != to_size)
20914 return false;
20916 if (to_size == 8 && (8 * from_nregs) != from_size)
20917 return false;
20919 return true;
20921 else
20922 return true;
20925 /* Since the VSX register set includes traditional floating point registers
20926 and altivec registers, just check for the size being different instead of
20927 trying to check whether the modes are vector modes. Otherwise it won't
20928 allow say DF and DI to change classes. For types like TFmode and TDmode
20929 that take 2 64-bit registers, rather than a single 128-bit register, don't
20930 allow subregs of those types to other 128 bit types. */
20931 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20933 unsigned num_regs = (from_size + 15) / 16;
20934 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20935 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20936 return false;
20938 return (from_size == 8 || from_size == 16);
20941 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20942 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20943 return false;
20945 return true;
20948 /* Debug version of rs6000_can_change_mode_class. */
20949 static bool
20950 rs6000_debug_can_change_mode_class (machine_mode from,
20951 machine_mode to,
20952 reg_class_t rclass)
20954 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20956 fprintf (stderr,
20957 "rs6000_can_change_mode_class, return %s, from = %s, "
20958 "to = %s, rclass = %s\n",
20959 ret ? "true" : "false",
20960 GET_MODE_NAME (from), GET_MODE_NAME (to),
20961 reg_class_names[rclass]);
20963 return ret;
20966 /* Return a string to do a move operation of 128 bits of data. */
20968 const char *
20969 rs6000_output_move_128bit (rtx operands[])
20971 rtx dest = operands[0];
20972 rtx src = operands[1];
20973 machine_mode mode = GET_MODE (dest);
20974 int dest_regno;
20975 int src_regno;
20976 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20977 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20979 if (REG_P (dest))
20981 dest_regno = REGNO (dest);
20982 dest_gpr_p = INT_REGNO_P (dest_regno);
20983 dest_fp_p = FP_REGNO_P (dest_regno);
20984 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20985 dest_vsx_p = dest_fp_p | dest_vmx_p;
20987 else
20989 dest_regno = -1;
20990 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20993 if (REG_P (src))
20995 src_regno = REGNO (src);
20996 src_gpr_p = INT_REGNO_P (src_regno);
20997 src_fp_p = FP_REGNO_P (src_regno);
20998 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20999 src_vsx_p = src_fp_p | src_vmx_p;
21001 else
21003 src_regno = -1;
21004 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
21007 /* Register moves. */
21008 if (dest_regno >= 0 && src_regno >= 0)
21010 if (dest_gpr_p)
21012 if (src_gpr_p)
21013 return "#";
21015 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
21016 return (WORDS_BIG_ENDIAN
21017 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
21018 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
21020 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
21021 return "#";
21024 else if (TARGET_VSX && dest_vsx_p)
21026 if (src_vsx_p)
21027 return "xxlor %x0,%x1,%x1";
21029 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
21030 return (WORDS_BIG_ENDIAN
21031 ? "mtvsrdd %x0,%1,%L1"
21032 : "mtvsrdd %x0,%L1,%1");
21034 else if (TARGET_DIRECT_MOVE && src_gpr_p)
21035 return "#";
21038 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
21039 return "vor %0,%1,%1";
21041 else if (dest_fp_p && src_fp_p)
21042 return "#";
21045 /* Loads. */
21046 else if (dest_regno >= 0 && MEM_P (src))
21048 if (dest_gpr_p)
21050 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
21051 return "lq %0,%1";
21052 else
21053 return "#";
21056 else if (TARGET_ALTIVEC && dest_vmx_p
21057 && altivec_indexed_or_indirect_operand (src, mode))
21058 return "lvx %0,%y1";
21060 else if (TARGET_VSX && dest_vsx_p)
21062 if (mode_supports_vsx_dform_quad (mode)
21063 && quad_address_p (XEXP (src, 0), mode, true))
21064 return "lxv %x0,%1";
21066 else if (TARGET_P9_VECTOR)
21067 return "lxvx %x0,%y1";
21069 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
21070 return "lxvw4x %x0,%y1";
21072 else
21073 return "lxvd2x %x0,%y1";
21076 else if (TARGET_ALTIVEC && dest_vmx_p)
21077 return "lvx %0,%y1";
21079 else if (dest_fp_p)
21080 return "#";
21083 /* Stores. */
21084 else if (src_regno >= 0 && MEM_P (dest))
21086 if (src_gpr_p)
21088 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
21089 return "stq %1,%0";
21090 else
21091 return "#";
21094 else if (TARGET_ALTIVEC && src_vmx_p
21095 && altivec_indexed_or_indirect_operand (src, mode))
21096 return "stvx %1,%y0";
21098 else if (TARGET_VSX && src_vsx_p)
21100 if (mode_supports_vsx_dform_quad (mode)
21101 && quad_address_p (XEXP (dest, 0), mode, true))
21102 return "stxv %x1,%0";
21104 else if (TARGET_P9_VECTOR)
21105 return "stxvx %x1,%y0";
21107 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
21108 return "stxvw4x %x1,%y0";
21110 else
21111 return "stxvd2x %x1,%y0";
21114 else if (TARGET_ALTIVEC && src_vmx_p)
21115 return "stvx %1,%y0";
21117 else if (src_fp_p)
21118 return "#";
21121 /* Constants. */
21122 else if (dest_regno >= 0
21123 && (GET_CODE (src) == CONST_INT
21124 || GET_CODE (src) == CONST_WIDE_INT
21125 || GET_CODE (src) == CONST_DOUBLE
21126 || GET_CODE (src) == CONST_VECTOR))
21128 if (dest_gpr_p)
21129 return "#";
21131 else if ((dest_vmx_p && TARGET_ALTIVEC)
21132 || (dest_vsx_p && TARGET_VSX))
21133 return output_vec_const_move (operands);
21136 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
21139 /* Validate a 128-bit move. */
21140 bool
21141 rs6000_move_128bit_ok_p (rtx operands[])
21143 machine_mode mode = GET_MODE (operands[0]);
21144 return (gpc_reg_operand (operands[0], mode)
21145 || gpc_reg_operand (operands[1], mode));
21148 /* Return true if a 128-bit move needs to be split. */
21149 bool
21150 rs6000_split_128bit_ok_p (rtx operands[])
21152 if (!reload_completed)
21153 return false;
21155 if (!gpr_or_gpr_p (operands[0], operands[1]))
21156 return false;
21158 if (quad_load_store_p (operands[0], operands[1]))
21159 return false;
21161 return true;
21165 /* Given a comparison operation, return the bit number in CCR to test. We
21166 know this is a valid comparison.
21168 SCC_P is 1 if this is for an scc. That means that %D will have been
21169 used instead of %C, so the bits will be in different places.
21171 Return -1 if OP isn't a valid comparison for some reason. */
21174 ccr_bit (rtx op, int scc_p)
21176 enum rtx_code code = GET_CODE (op);
21177 machine_mode cc_mode;
21178 int cc_regnum;
21179 int base_bit;
21180 rtx reg;
21182 if (!COMPARISON_P (op))
21183 return -1;
21185 reg = XEXP (op, 0);
21187 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21189 cc_mode = GET_MODE (reg);
21190 cc_regnum = REGNO (reg);
21191 base_bit = 4 * (cc_regnum - CR0_REGNO);
21193 validate_condition_mode (code, cc_mode);
21195 /* When generating a sCOND operation, only positive conditions are
21196 allowed. */
21197 gcc_assert (!scc_p
21198 || code == EQ || code == GT || code == LT || code == UNORDERED
21199 || code == GTU || code == LTU);
21201 switch (code)
21203 case NE:
21204 return scc_p ? base_bit + 3 : base_bit + 2;
21205 case EQ:
21206 return base_bit + 2;
21207 case GT: case GTU: case UNLE:
21208 return base_bit + 1;
21209 case LT: case LTU: case UNGE:
21210 return base_bit;
21211 case ORDERED: case UNORDERED:
21212 return base_bit + 3;
21214 case GE: case GEU:
21215 /* If scc, we will have done a cror to put the bit in the
21216 unordered position. So test that bit. For integer, this is ! LT
21217 unless this is an scc insn. */
21218 return scc_p ? base_bit + 3 : base_bit;
21220 case LE: case LEU:
21221 return scc_p ? base_bit + 3 : base_bit + 1;
21223 default:
21224 gcc_unreachable ();
21228 /* Return the GOT register. */
21231 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21233 /* The second flow pass currently (June 1999) can't update
21234 regs_ever_live without disturbing other parts of the compiler, so
21235 update it here to make the prolog/epilogue code happy. */
21236 if (!can_create_pseudo_p ()
21237 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21238 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21240 crtl->uses_pic_offset_table = 1;
21242 return pic_offset_table_rtx;
21245 static rs6000_stack_t stack_info;
21247 /* Function to init struct machine_function.
21248 This will be called, via a pointer variable,
21249 from push_function_context. */
21251 static struct machine_function *
21252 rs6000_init_machine_status (void)
21254 stack_info.reload_completed = 0;
21255 return ggc_cleared_alloc<machine_function> ();
21258 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21260 /* Write out a function code label. */
21262 void
21263 rs6000_output_function_entry (FILE *file, const char *fname)
21265 if (fname[0] != '.')
21267 switch (DEFAULT_ABI)
21269 default:
21270 gcc_unreachable ();
21272 case ABI_AIX:
21273 if (DOT_SYMBOLS)
21274 putc ('.', file);
21275 else
21276 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21277 break;
21279 case ABI_ELFv2:
21280 case ABI_V4:
21281 case ABI_DARWIN:
21282 break;
21286 RS6000_OUTPUT_BASENAME (file, fname);
21289 /* Print an operand. Recognize special options, documented below. */
21291 #if TARGET_ELF
21292 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21293 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21294 #else
21295 #define SMALL_DATA_RELOC "sda21"
21296 #define SMALL_DATA_REG 0
21297 #endif
21299 void
21300 print_operand (FILE *file, rtx x, int code)
21302 int i;
21303 unsigned HOST_WIDE_INT uval;
21305 switch (code)
21307 /* %a is output_address. */
21309 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21310 output_operand. */
21312 case 'D':
21313 /* Like 'J' but get to the GT bit only. */
21314 gcc_assert (REG_P (x));
21316 /* Bit 1 is GT bit. */
21317 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21319 /* Add one for shift count in rlinm for scc. */
21320 fprintf (file, "%d", i + 1);
21321 return;
21323 case 'e':
21324 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21325 if (! INT_P (x))
21327 output_operand_lossage ("invalid %%e value");
21328 return;
21331 uval = INTVAL (x);
21332 if ((uval & 0xffff) == 0 && uval != 0)
21333 putc ('s', file);
21334 return;
21336 case 'E':
21337 /* X is a CR register. Print the number of the EQ bit of the CR */
21338 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21339 output_operand_lossage ("invalid %%E value");
21340 else
21341 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21342 return;
21344 case 'f':
21345 /* X is a CR register. Print the shift count needed to move it
21346 to the high-order four bits. */
21347 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21348 output_operand_lossage ("invalid %%f value");
21349 else
21350 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21351 return;
21353 case 'F':
21354 /* Similar, but print the count for the rotate in the opposite
21355 direction. */
21356 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21357 output_operand_lossage ("invalid %%F value");
21358 else
21359 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21360 return;
21362 case 'G':
21363 /* X is a constant integer. If it is negative, print "m",
21364 otherwise print "z". This is to make an aze or ame insn. */
21365 if (GET_CODE (x) != CONST_INT)
21366 output_operand_lossage ("invalid %%G value");
21367 else if (INTVAL (x) >= 0)
21368 putc ('z', file);
21369 else
21370 putc ('m', file);
21371 return;
21373 case 'h':
21374 /* If constant, output low-order five bits. Otherwise, write
21375 normally. */
21376 if (INT_P (x))
21377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21378 else
21379 print_operand (file, x, 0);
21380 return;
21382 case 'H':
21383 /* If constant, output low-order six bits. Otherwise, write
21384 normally. */
21385 if (INT_P (x))
21386 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21387 else
21388 print_operand (file, x, 0);
21389 return;
21391 case 'I':
21392 /* Print `i' if this is a constant, else nothing. */
21393 if (INT_P (x))
21394 putc ('i', file);
21395 return;
21397 case 'j':
21398 /* Write the bit number in CCR for jump. */
21399 i = ccr_bit (x, 0);
21400 if (i == -1)
21401 output_operand_lossage ("invalid %%j code");
21402 else
21403 fprintf (file, "%d", i);
21404 return;
21406 case 'J':
21407 /* Similar, but add one for shift count in rlinm for scc and pass
21408 scc flag to `ccr_bit'. */
21409 i = ccr_bit (x, 1);
21410 if (i == -1)
21411 output_operand_lossage ("invalid %%J code");
21412 else
21413 /* If we want bit 31, write a shift count of zero, not 32. */
21414 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21415 return;
21417 case 'k':
21418 /* X must be a constant. Write the 1's complement of the
21419 constant. */
21420 if (! INT_P (x))
21421 output_operand_lossage ("invalid %%k value");
21422 else
21423 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21424 return;
21426 case 'K':
21427 /* X must be a symbolic constant on ELF. Write an
21428 expression suitable for an 'addi' that adds in the low 16
21429 bits of the MEM. */
21430 if (GET_CODE (x) == CONST)
21432 if (GET_CODE (XEXP (x, 0)) != PLUS
21433 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21434 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21435 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21436 output_operand_lossage ("invalid %%K value");
21438 print_operand_address (file, x);
21439 fputs ("@l", file);
21440 return;
21442 /* %l is output_asm_label. */
21444 case 'L':
21445 /* Write second word of DImode or DFmode reference. Works on register
21446 or non-indexed memory only. */
21447 if (REG_P (x))
21448 fputs (reg_names[REGNO (x) + 1], file);
21449 else if (MEM_P (x))
21451 machine_mode mode = GET_MODE (x);
21452 /* Handle possible auto-increment. Since it is pre-increment and
21453 we have already done it, we can just use an offset of word. */
21454 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21455 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21456 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21457 UNITS_PER_WORD));
21458 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21459 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21460 UNITS_PER_WORD));
21461 else
21462 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21463 UNITS_PER_WORD),
21464 0));
21466 if (small_data_operand (x, GET_MODE (x)))
21467 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21468 reg_names[SMALL_DATA_REG]);
21470 return;
21472 case 'N': /* Unused */
21473 /* Write the number of elements in the vector times 4. */
21474 if (GET_CODE (x) != PARALLEL)
21475 output_operand_lossage ("invalid %%N value");
21476 else
21477 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21478 return;
21480 case 'O': /* Unused */
21481 /* Similar, but subtract 1 first. */
21482 if (GET_CODE (x) != PARALLEL)
21483 output_operand_lossage ("invalid %%O value");
21484 else
21485 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21486 return;
21488 case 'p':
21489 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21490 if (! INT_P (x)
21491 || INTVAL (x) < 0
21492 || (i = exact_log2 (INTVAL (x))) < 0)
21493 output_operand_lossage ("invalid %%p value");
21494 else
21495 fprintf (file, "%d", i);
21496 return;
21498 case 'P':
21499 /* The operand must be an indirect memory reference. The result
21500 is the register name. */
21501 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21502 || REGNO (XEXP (x, 0)) >= 32)
21503 output_operand_lossage ("invalid %%P value");
21504 else
21505 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21506 return;
21508 case 'q':
21509 /* This outputs the logical code corresponding to a boolean
21510 expression. The expression may have one or both operands
21511 negated (if one, only the first one). For condition register
21512 logical operations, it will also treat the negated
21513 CR codes as NOTs, but not handle NOTs of them. */
21515 const char *const *t = 0;
21516 const char *s;
21517 enum rtx_code code = GET_CODE (x);
21518 static const char * const tbl[3][3] = {
21519 { "and", "andc", "nor" },
21520 { "or", "orc", "nand" },
21521 { "xor", "eqv", "xor" } };
21523 if (code == AND)
21524 t = tbl[0];
21525 else if (code == IOR)
21526 t = tbl[1];
21527 else if (code == XOR)
21528 t = tbl[2];
21529 else
21530 output_operand_lossage ("invalid %%q value");
21532 if (GET_CODE (XEXP (x, 0)) != NOT)
21533 s = t[0];
21534 else
21536 if (GET_CODE (XEXP (x, 1)) == NOT)
21537 s = t[2];
21538 else
21539 s = t[1];
21542 fputs (s, file);
21544 return;
21546 case 'Q':
21547 if (! TARGET_MFCRF)
21548 return;
21549 fputc (',', file);
21550 /* FALLTHRU */
21552 case 'R':
21553 /* X is a CR register. Print the mask for `mtcrf'. */
21554 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21555 output_operand_lossage ("invalid %%R value");
21556 else
21557 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21558 return;
21560 case 's':
21561 /* Low 5 bits of 32 - value */
21562 if (! INT_P (x))
21563 output_operand_lossage ("invalid %%s value");
21564 else
21565 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21566 return;
21568 case 't':
21569 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21570 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21572 /* Bit 3 is OV bit. */
21573 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21575 /* If we want bit 31, write a shift count of zero, not 32. */
21576 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21577 return;
21579 case 'T':
21580 /* Print the symbolic name of a branch target register. */
21581 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21582 && REGNO (x) != CTR_REGNO))
21583 output_operand_lossage ("invalid %%T value");
21584 else if (REGNO (x) == LR_REGNO)
21585 fputs ("lr", file);
21586 else
21587 fputs ("ctr", file);
21588 return;
21590 case 'u':
21591 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21592 for use in unsigned operand. */
21593 if (! INT_P (x))
21595 output_operand_lossage ("invalid %%u value");
21596 return;
21599 uval = INTVAL (x);
21600 if ((uval & 0xffff) == 0)
21601 uval >>= 16;
21603 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21604 return;
21606 case 'v':
21607 /* High-order 16 bits of constant for use in signed operand. */
21608 if (! INT_P (x))
21609 output_operand_lossage ("invalid %%v value");
21610 else
21611 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21612 (INTVAL (x) >> 16) & 0xffff);
21613 return;
21615 case 'U':
21616 /* Print `u' if this has an auto-increment or auto-decrement. */
21617 if (MEM_P (x)
21618 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21619 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21620 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21621 putc ('u', file);
21622 return;
21624 case 'V':
21625 /* Print the trap code for this operand. */
21626 switch (GET_CODE (x))
21628 case EQ:
21629 fputs ("eq", file); /* 4 */
21630 break;
21631 case NE:
21632 fputs ("ne", file); /* 24 */
21633 break;
21634 case LT:
21635 fputs ("lt", file); /* 16 */
21636 break;
21637 case LE:
21638 fputs ("le", file); /* 20 */
21639 break;
21640 case GT:
21641 fputs ("gt", file); /* 8 */
21642 break;
21643 case GE:
21644 fputs ("ge", file); /* 12 */
21645 break;
21646 case LTU:
21647 fputs ("llt", file); /* 2 */
21648 break;
21649 case LEU:
21650 fputs ("lle", file); /* 6 */
21651 break;
21652 case GTU:
21653 fputs ("lgt", file); /* 1 */
21654 break;
21655 case GEU:
21656 fputs ("lge", file); /* 5 */
21657 break;
21658 default:
21659 gcc_unreachable ();
21661 break;
21663 case 'w':
21664 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21665 normally. */
21666 if (INT_P (x))
21667 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21668 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21669 else
21670 print_operand (file, x, 0);
21671 return;
21673 case 'x':
21674 /* X is a FPR or Altivec register used in a VSX context. */
21675 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21676 output_operand_lossage ("invalid %%x value");
21677 else
21679 int reg = REGNO (x);
21680 int vsx_reg = (FP_REGNO_P (reg)
21681 ? reg - 32
21682 : reg - FIRST_ALTIVEC_REGNO + 32);
21684 #ifdef TARGET_REGNAMES
21685 if (TARGET_REGNAMES)
21686 fprintf (file, "%%vs%d", vsx_reg);
21687 else
21688 #endif
21689 fprintf (file, "%d", vsx_reg);
21691 return;
21693 case 'X':
21694 if (MEM_P (x)
21695 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21696 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21697 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21698 putc ('x', file);
21699 return;
21701 case 'Y':
21702 /* Like 'L', for third word of TImode/PTImode */
21703 if (REG_P (x))
21704 fputs (reg_names[REGNO (x) + 2], file);
21705 else if (MEM_P (x))
21707 machine_mode mode = GET_MODE (x);
21708 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21709 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21710 output_address (mode, plus_constant (Pmode,
21711 XEXP (XEXP (x, 0), 0), 8));
21712 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21713 output_address (mode, plus_constant (Pmode,
21714 XEXP (XEXP (x, 0), 0), 8));
21715 else
21716 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21717 if (small_data_operand (x, GET_MODE (x)))
21718 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21719 reg_names[SMALL_DATA_REG]);
21721 return;
21723 case 'z':
21724 /* X is a SYMBOL_REF. Write out the name preceded by a
21725 period and without any trailing data in brackets. Used for function
21726 names. If we are configured for System V (or the embedded ABI) on
21727 the PowerPC, do not emit the period, since those systems do not use
21728 TOCs and the like. */
21729 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21731 /* For macho, check to see if we need a stub. */
21732 if (TARGET_MACHO)
21734 const char *name = XSTR (x, 0);
21735 #if TARGET_MACHO
21736 if (darwin_emit_branch_islands
21737 && MACHOPIC_INDIRECT
21738 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21739 name = machopic_indirection_name (x, /*stub_p=*/true);
21740 #endif
21741 assemble_name (file, name);
21743 else if (!DOT_SYMBOLS)
21744 assemble_name (file, XSTR (x, 0));
21745 else
21746 rs6000_output_function_entry (file, XSTR (x, 0));
21747 return;
21749 case 'Z':
21750 /* Like 'L', for last word of TImode/PTImode. */
21751 if (REG_P (x))
21752 fputs (reg_names[REGNO (x) + 3], file);
21753 else if (MEM_P (x))
21755 machine_mode mode = GET_MODE (x);
21756 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21757 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21758 output_address (mode, plus_constant (Pmode,
21759 XEXP (XEXP (x, 0), 0), 12));
21760 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21761 output_address (mode, plus_constant (Pmode,
21762 XEXP (XEXP (x, 0), 0), 12));
21763 else
21764 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21765 if (small_data_operand (x, GET_MODE (x)))
21766 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21767 reg_names[SMALL_DATA_REG]);
21769 return;
21771 /* Print AltiVec memory operand. */
21772 case 'y':
21774 rtx tmp;
21776 gcc_assert (MEM_P (x));
21778 tmp = XEXP (x, 0);
21780 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21781 && GET_CODE (tmp) == AND
21782 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21783 && INTVAL (XEXP (tmp, 1)) == -16)
21784 tmp = XEXP (tmp, 0);
21785 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21786 && GET_CODE (tmp) == PRE_MODIFY)
21787 tmp = XEXP (tmp, 1);
21788 if (REG_P (tmp))
21789 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21790 else
21792 if (GET_CODE (tmp) != PLUS
21793 || !REG_P (XEXP (tmp, 0))
21794 || !REG_P (XEXP (tmp, 1)))
21796 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21797 break;
21800 if (REGNO (XEXP (tmp, 0)) == 0)
21801 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21802 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21803 else
21804 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21805 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21807 break;
21810 case 0:
21811 if (REG_P (x))
21812 fprintf (file, "%s", reg_names[REGNO (x)]);
21813 else if (MEM_P (x))
21815 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21816 know the width from the mode. */
21817 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21818 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21819 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21820 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21821 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21822 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21823 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21824 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21825 else
21826 output_address (GET_MODE (x), XEXP (x, 0));
21828 else
21830 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21831 /* This hack along with a corresponding hack in
21832 rs6000_output_addr_const_extra arranges to output addends
21833 where the assembler expects to find them. eg.
21834 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21835 without this hack would be output as "x@toc+4". We
21836 want "x+4@toc". */
21837 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21838 else
21839 output_addr_const (file, x);
21841 return;
21843 case '&':
21844 if (const char *name = get_some_local_dynamic_name ())
21845 assemble_name (file, name);
21846 else
21847 output_operand_lossage ("'%%&' used without any "
21848 "local dynamic TLS references");
21849 return;
21851 default:
21852 output_operand_lossage ("invalid %%xn code");
21856 /* Print the address of an operand. */
21858 void
21859 print_operand_address (FILE *file, rtx x)
21861 if (REG_P (x))
21862 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21863 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21864 || GET_CODE (x) == LABEL_REF)
21866 output_addr_const (file, x);
21867 if (small_data_operand (x, GET_MODE (x)))
21868 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21869 reg_names[SMALL_DATA_REG]);
21870 else
21871 gcc_assert (!TARGET_TOC);
21873 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21874 && REG_P (XEXP (x, 1)))
21876 if (REGNO (XEXP (x, 0)) == 0)
21877 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21878 reg_names[ REGNO (XEXP (x, 0)) ]);
21879 else
21880 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21881 reg_names[ REGNO (XEXP (x, 1)) ]);
21883 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21884 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21885 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21886 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21887 #if TARGET_MACHO
21888 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21889 && CONSTANT_P (XEXP (x, 1)))
21891 fprintf (file, "lo16(");
21892 output_addr_const (file, XEXP (x, 1));
21893 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21895 #endif
21896 #if TARGET_ELF
21897 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21898 && CONSTANT_P (XEXP (x, 1)))
21900 output_addr_const (file, XEXP (x, 1));
21901 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21903 #endif
21904 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21906 /* This hack along with a corresponding hack in
21907 rs6000_output_addr_const_extra arranges to output addends
21908 where the assembler expects to find them. eg.
21909 (lo_sum (reg 9)
21910 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21911 without this hack would be output as "x@toc+8@l(9)". We
21912 want "x+8@toc@l(9)". */
21913 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21914 if (GET_CODE (x) == LO_SUM)
21915 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21916 else
21917 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21919 else
21920 gcc_unreachable ();
21923 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21925 static bool
21926 rs6000_output_addr_const_extra (FILE *file, rtx x)
21928 if (GET_CODE (x) == UNSPEC)
21929 switch (XINT (x, 1))
21931 case UNSPEC_TOCREL:
21932 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21933 && REG_P (XVECEXP (x, 0, 1))
21934 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21935 output_addr_const (file, XVECEXP (x, 0, 0));
21936 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21938 if (INTVAL (tocrel_offset_oac) >= 0)
21939 fprintf (file, "+");
21940 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21942 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21944 putc ('-', file);
21945 assemble_name (file, toc_label_name);
21946 need_toc_init = 1;
21948 else if (TARGET_ELF)
21949 fputs ("@toc", file);
21950 return true;
21952 #if TARGET_MACHO
21953 case UNSPEC_MACHOPIC_OFFSET:
21954 output_addr_const (file, XVECEXP (x, 0, 0));
21955 putc ('-', file);
21956 machopic_output_function_base_name (file);
21957 return true;
21958 #endif
21960 return false;
21963 /* Target hook for assembling integer objects. The PowerPC version has
21964 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21965 is defined. It also needs to handle DI-mode objects on 64-bit
21966 targets. */
21968 static bool
21969 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21971 #ifdef RELOCATABLE_NEEDS_FIXUP
21972 /* Special handling for SI values. */
21973 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21975 static int recurse = 0;
21977 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21978 the .fixup section. Since the TOC section is already relocated, we
21979 don't need to mark it here. We used to skip the text section, but it
21980 should never be valid for relocated addresses to be placed in the text
21981 section. */
21982 if (DEFAULT_ABI == ABI_V4
21983 && (TARGET_RELOCATABLE || flag_pic > 1)
21984 && in_section != toc_section
21985 && !recurse
21986 && !CONST_SCALAR_INT_P (x)
21987 && CONSTANT_P (x))
21989 char buf[256];
21991 recurse = 1;
21992 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21993 fixuplabelno++;
21994 ASM_OUTPUT_LABEL (asm_out_file, buf);
21995 fprintf (asm_out_file, "\t.long\t(");
21996 output_addr_const (asm_out_file, x);
21997 fprintf (asm_out_file, ")@fixup\n");
21998 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21999 ASM_OUTPUT_ALIGN (asm_out_file, 2);
22000 fprintf (asm_out_file, "\t.long\t");
22001 assemble_name (asm_out_file, buf);
22002 fprintf (asm_out_file, "\n\t.previous\n");
22003 recurse = 0;
22004 return true;
22006 /* Remove initial .'s to turn a -mcall-aixdesc function
22007 address into the address of the descriptor, not the function
22008 itself. */
22009 else if (GET_CODE (x) == SYMBOL_REF
22010 && XSTR (x, 0)[0] == '.'
22011 && DEFAULT_ABI == ABI_AIX)
22013 const char *name = XSTR (x, 0);
22014 while (*name == '.')
22015 name++;
22017 fprintf (asm_out_file, "\t.long\t%s\n", name);
22018 return true;
22021 #endif /* RELOCATABLE_NEEDS_FIXUP */
22022 return default_assemble_integer (x, size, aligned_p);
22025 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
22026 /* Emit an assembler directive to set symbol visibility for DECL to
22027 VISIBILITY_TYPE. */
22029 static void
22030 rs6000_assemble_visibility (tree decl, int vis)
22032 if (TARGET_XCOFF)
22033 return;
22035 /* Functions need to have their entry point symbol visibility set as
22036 well as their descriptor symbol visibility. */
22037 if (DEFAULT_ABI == ABI_AIX
22038 && DOT_SYMBOLS
22039 && TREE_CODE (decl) == FUNCTION_DECL)
22041 static const char * const visibility_types[] = {
22042 NULL, "protected", "hidden", "internal"
22045 const char *name, *type;
22047 name = ((* targetm.strip_name_encoding)
22048 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
22049 type = visibility_types[vis];
22051 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
22052 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
22054 else
22055 default_assemble_visibility (decl, vis);
22057 #endif
22059 enum rtx_code
22060 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
22062 /* Reversal of FP compares takes care -- an ordered compare
22063 becomes an unordered compare and vice versa. */
22064 if (mode == CCFPmode
22065 && (!flag_finite_math_only
22066 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
22067 || code == UNEQ || code == LTGT))
22068 return reverse_condition_maybe_unordered (code);
22069 else
22070 return reverse_condition (code);
22073 /* Generate a compare for CODE. Return a brand-new rtx that
22074 represents the result of the compare. */
22076 static rtx
22077 rs6000_generate_compare (rtx cmp, machine_mode mode)
22079 machine_mode comp_mode;
22080 rtx compare_result;
22081 enum rtx_code code = GET_CODE (cmp);
22082 rtx op0 = XEXP (cmp, 0);
22083 rtx op1 = XEXP (cmp, 1);
22085 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22086 comp_mode = CCmode;
22087 else if (FLOAT_MODE_P (mode))
22088 comp_mode = CCFPmode;
22089 else if (code == GTU || code == LTU
22090 || code == GEU || code == LEU)
22091 comp_mode = CCUNSmode;
22092 else if ((code == EQ || code == NE)
22093 && unsigned_reg_p (op0)
22094 && (unsigned_reg_p (op1)
22095 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
22096 /* These are unsigned values, perhaps there will be a later
22097 ordering compare that can be shared with this one. */
22098 comp_mode = CCUNSmode;
22099 else
22100 comp_mode = CCmode;
22102 /* If we have an unsigned compare, make sure we don't have a signed value as
22103 an immediate. */
22104 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
22105 && INTVAL (op1) < 0)
22107 op0 = copy_rtx_if_shared (op0);
22108 op1 = force_reg (GET_MODE (op0), op1);
22109 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
22112 /* First, the compare. */
22113 compare_result = gen_reg_rtx (comp_mode);
22115 /* IEEE 128-bit support in VSX registers when we do not have hardware
22116 support. */
22117 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22119 rtx libfunc = NULL_RTX;
22120 bool check_nan = false;
22121 rtx dest;
22123 switch (code)
22125 case EQ:
22126 case NE:
22127 libfunc = optab_libfunc (eq_optab, mode);
22128 break;
22130 case GT:
22131 case GE:
22132 libfunc = optab_libfunc (ge_optab, mode);
22133 break;
22135 case LT:
22136 case LE:
22137 libfunc = optab_libfunc (le_optab, mode);
22138 break;
22140 case UNORDERED:
22141 case ORDERED:
22142 libfunc = optab_libfunc (unord_optab, mode);
22143 code = (code == UNORDERED) ? NE : EQ;
22144 break;
22146 case UNGE:
22147 case UNGT:
22148 check_nan = true;
22149 libfunc = optab_libfunc (ge_optab, mode);
22150 code = (code == UNGE) ? GE : GT;
22151 break;
22153 case UNLE:
22154 case UNLT:
22155 check_nan = true;
22156 libfunc = optab_libfunc (le_optab, mode);
22157 code = (code == UNLE) ? LE : LT;
22158 break;
22160 case UNEQ:
22161 case LTGT:
22162 check_nan = true;
22163 libfunc = optab_libfunc (eq_optab, mode);
22164 code = (code = UNEQ) ? EQ : NE;
22165 break;
22167 default:
22168 gcc_unreachable ();
22171 gcc_assert (libfunc);
22173 if (!check_nan)
22174 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22175 SImode, op0, mode, op1, mode);
22177 /* The library signals an exception for signalling NaNs, so we need to
22178 handle isgreater, etc. by first checking isordered. */
22179 else
22181 rtx ne_rtx, normal_dest, unord_dest;
22182 rtx unord_func = optab_libfunc (unord_optab, mode);
22183 rtx join_label = gen_label_rtx ();
22184 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22185 rtx unord_cmp = gen_reg_rtx (comp_mode);
22188 /* Test for either value being a NaN. */
22189 gcc_assert (unord_func);
22190 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22191 SImode, op0, mode, op1, mode);
22193 /* Set value (0) if either value is a NaN, and jump to the join
22194 label. */
22195 dest = gen_reg_rtx (SImode);
22196 emit_move_insn (dest, const1_rtx);
22197 emit_insn (gen_rtx_SET (unord_cmp,
22198 gen_rtx_COMPARE (comp_mode, unord_dest,
22199 const0_rtx)));
22201 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22202 emit_jump_insn (gen_rtx_SET (pc_rtx,
22203 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22204 join_ref,
22205 pc_rtx)));
22207 /* Do the normal comparison, knowing that the values are not
22208 NaNs. */
22209 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22210 SImode, op0, mode, op1, mode);
22212 emit_insn (gen_cstoresi4 (dest,
22213 gen_rtx_fmt_ee (code, SImode, normal_dest,
22214 const0_rtx),
22215 normal_dest, const0_rtx));
22217 /* Join NaN and non-Nan paths. Compare dest against 0. */
22218 emit_label (join_label);
22219 code = NE;
22222 emit_insn (gen_rtx_SET (compare_result,
22223 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22226 else
22228 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22229 CLOBBERs to match cmptf_internal2 pattern. */
22230 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22231 && FLOAT128_IBM_P (GET_MODE (op0))
22232 && TARGET_HARD_FLOAT)
22233 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22234 gen_rtvec (10,
22235 gen_rtx_SET (compare_result,
22236 gen_rtx_COMPARE (comp_mode, op0, op1)),
22237 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22238 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22239 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22240 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22241 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22242 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22243 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22244 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22245 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22246 else if (GET_CODE (op1) == UNSPEC
22247 && XINT (op1, 1) == UNSPEC_SP_TEST)
22249 rtx op1b = XVECEXP (op1, 0, 0);
22250 comp_mode = CCEQmode;
22251 compare_result = gen_reg_rtx (CCEQmode);
22252 if (TARGET_64BIT)
22253 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22254 else
22255 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22257 else
22258 emit_insn (gen_rtx_SET (compare_result,
22259 gen_rtx_COMPARE (comp_mode, op0, op1)));
22262 /* Some kinds of FP comparisons need an OR operation;
22263 under flag_finite_math_only we don't bother. */
22264 if (FLOAT_MODE_P (mode)
22265 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22266 && !flag_finite_math_only
22267 && (code == LE || code == GE
22268 || code == UNEQ || code == LTGT
22269 || code == UNGT || code == UNLT))
22271 enum rtx_code or1, or2;
22272 rtx or1_rtx, or2_rtx, compare2_rtx;
22273 rtx or_result = gen_reg_rtx (CCEQmode);
22275 switch (code)
22277 case LE: or1 = LT; or2 = EQ; break;
22278 case GE: or1 = GT; or2 = EQ; break;
22279 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22280 case LTGT: or1 = LT; or2 = GT; break;
22281 case UNGT: or1 = UNORDERED; or2 = GT; break;
22282 case UNLT: or1 = UNORDERED; or2 = LT; break;
22283 default: gcc_unreachable ();
22285 validate_condition_mode (or1, comp_mode);
22286 validate_condition_mode (or2, comp_mode);
22287 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22288 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22289 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22290 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22291 const_true_rtx);
22292 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22294 compare_result = or_result;
22295 code = EQ;
22298 validate_condition_mode (code, GET_MODE (compare_result));
22300 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22304 /* Return the diagnostic message string if the binary operation OP is
22305 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22307 static const char*
22308 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22309 const_tree type1,
22310 const_tree type2)
22312 machine_mode mode1 = TYPE_MODE (type1);
22313 machine_mode mode2 = TYPE_MODE (type2);
22315 /* For complex modes, use the inner type. */
22316 if (COMPLEX_MODE_P (mode1))
22317 mode1 = GET_MODE_INNER (mode1);
22319 if (COMPLEX_MODE_P (mode2))
22320 mode2 = GET_MODE_INNER (mode2);
22322 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22323 double to intermix unless -mfloat128-convert. */
22324 if (mode1 == mode2)
22325 return NULL;
22327 if (!TARGET_FLOAT128_CVT)
22329 if ((mode1 == KFmode && mode2 == IFmode)
22330 || (mode1 == IFmode && mode2 == KFmode))
22331 return N_("__float128 and __ibm128 cannot be used in the same "
22332 "expression");
22334 if (TARGET_IEEEQUAD
22335 && ((mode1 == IFmode && mode2 == TFmode)
22336 || (mode1 == TFmode && mode2 == IFmode)))
22337 return N_("__ibm128 and long double cannot be used in the same "
22338 "expression");
22340 if (!TARGET_IEEEQUAD
22341 && ((mode1 == KFmode && mode2 == TFmode)
22342 || (mode1 == TFmode && mode2 == KFmode)))
22343 return N_("__float128 and long double cannot be used in the same "
22344 "expression");
22347 return NULL;
22351 /* Expand floating point conversion to/from __float128 and __ibm128. */
22353 void
22354 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22356 machine_mode dest_mode = GET_MODE (dest);
22357 machine_mode src_mode = GET_MODE (src);
22358 convert_optab cvt = unknown_optab;
22359 bool do_move = false;
22360 rtx libfunc = NULL_RTX;
22361 rtx dest2;
22362 typedef rtx (*rtx_2func_t) (rtx, rtx);
22363 rtx_2func_t hw_convert = (rtx_2func_t)0;
22364 size_t kf_or_tf;
22366 struct hw_conv_t {
22367 rtx_2func_t from_df;
22368 rtx_2func_t from_sf;
22369 rtx_2func_t from_si_sign;
22370 rtx_2func_t from_si_uns;
22371 rtx_2func_t from_di_sign;
22372 rtx_2func_t from_di_uns;
22373 rtx_2func_t to_df;
22374 rtx_2func_t to_sf;
22375 rtx_2func_t to_si_sign;
22376 rtx_2func_t to_si_uns;
22377 rtx_2func_t to_di_sign;
22378 rtx_2func_t to_di_uns;
22379 } hw_conversions[2] = {
22380 /* convertions to/from KFmode */
22382 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22383 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22384 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22385 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22386 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22387 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22388 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22389 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22390 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22391 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22392 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22393 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22396 /* convertions to/from TFmode */
22398 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22399 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22400 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22401 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22402 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22403 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22404 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22405 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22406 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22407 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22408 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22409 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22413 if (dest_mode == src_mode)
22414 gcc_unreachable ();
22416 /* Eliminate memory operations. */
22417 if (MEM_P (src))
22418 src = force_reg (src_mode, src);
22420 if (MEM_P (dest))
22422 rtx tmp = gen_reg_rtx (dest_mode);
22423 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22424 rs6000_emit_move (dest, tmp, dest_mode);
22425 return;
22428 /* Convert to IEEE 128-bit floating point. */
22429 if (FLOAT128_IEEE_P (dest_mode))
22431 if (dest_mode == KFmode)
22432 kf_or_tf = 0;
22433 else if (dest_mode == TFmode)
22434 kf_or_tf = 1;
22435 else
22436 gcc_unreachable ();
22438 switch (src_mode)
22440 case E_DFmode:
22441 cvt = sext_optab;
22442 hw_convert = hw_conversions[kf_or_tf].from_df;
22443 break;
22445 case E_SFmode:
22446 cvt = sext_optab;
22447 hw_convert = hw_conversions[kf_or_tf].from_sf;
22448 break;
22450 case E_KFmode:
22451 case E_IFmode:
22452 case E_TFmode:
22453 if (FLOAT128_IBM_P (src_mode))
22454 cvt = sext_optab;
22455 else
22456 do_move = true;
22457 break;
22459 case E_SImode:
22460 if (unsigned_p)
22462 cvt = ufloat_optab;
22463 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22465 else
22467 cvt = sfloat_optab;
22468 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22470 break;
22472 case E_DImode:
22473 if (unsigned_p)
22475 cvt = ufloat_optab;
22476 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22478 else
22480 cvt = sfloat_optab;
22481 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22483 break;
22485 default:
22486 gcc_unreachable ();
22490 /* Convert from IEEE 128-bit floating point. */
22491 else if (FLOAT128_IEEE_P (src_mode))
22493 if (src_mode == KFmode)
22494 kf_or_tf = 0;
22495 else if (src_mode == TFmode)
22496 kf_or_tf = 1;
22497 else
22498 gcc_unreachable ();
22500 switch (dest_mode)
22502 case E_DFmode:
22503 cvt = trunc_optab;
22504 hw_convert = hw_conversions[kf_or_tf].to_df;
22505 break;
22507 case E_SFmode:
22508 cvt = trunc_optab;
22509 hw_convert = hw_conversions[kf_or_tf].to_sf;
22510 break;
22512 case E_KFmode:
22513 case E_IFmode:
22514 case E_TFmode:
22515 if (FLOAT128_IBM_P (dest_mode))
22516 cvt = trunc_optab;
22517 else
22518 do_move = true;
22519 break;
22521 case E_SImode:
22522 if (unsigned_p)
22524 cvt = ufix_optab;
22525 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22527 else
22529 cvt = sfix_optab;
22530 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22532 break;
22534 case E_DImode:
22535 if (unsigned_p)
22537 cvt = ufix_optab;
22538 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22540 else
22542 cvt = sfix_optab;
22543 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22545 break;
22547 default:
22548 gcc_unreachable ();
22552 /* Both IBM format. */
22553 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22554 do_move = true;
22556 else
22557 gcc_unreachable ();
22559 /* Handle conversion between TFmode/KFmode. */
22560 if (do_move)
22561 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22563 /* Handle conversion if we have hardware support. */
22564 else if (TARGET_FLOAT128_HW && hw_convert)
22565 emit_insn ((hw_convert) (dest, src));
22567 /* Call an external function to do the conversion. */
22568 else if (cvt != unknown_optab)
22570 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22571 gcc_assert (libfunc != NULL_RTX);
22573 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22574 src, src_mode);
22576 gcc_assert (dest2 != NULL_RTX);
22577 if (!rtx_equal_p (dest, dest2))
22578 emit_move_insn (dest, dest2);
22581 else
22582 gcc_unreachable ();
22584 return;
22588 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22589 can be used as that dest register. Return the dest register. */
22592 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22594 if (op2 == const0_rtx)
22595 return op1;
22597 if (GET_CODE (scratch) == SCRATCH)
22598 scratch = gen_reg_rtx (mode);
22600 if (logical_operand (op2, mode))
22601 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22602 else
22603 emit_insn (gen_rtx_SET (scratch,
22604 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22606 return scratch;
22609 void
22610 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22612 rtx condition_rtx;
22613 machine_mode op_mode;
22614 enum rtx_code cond_code;
22615 rtx result = operands[0];
22617 condition_rtx = rs6000_generate_compare (operands[1], mode);
22618 cond_code = GET_CODE (condition_rtx);
22620 if (cond_code == NE
22621 || cond_code == GE || cond_code == LE
22622 || cond_code == GEU || cond_code == LEU
22623 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22625 rtx not_result = gen_reg_rtx (CCEQmode);
22626 rtx not_op, rev_cond_rtx;
22627 machine_mode cc_mode;
22629 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22631 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22632 SImode, XEXP (condition_rtx, 0), const0_rtx);
22633 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22634 emit_insn (gen_rtx_SET (not_result, not_op));
22635 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22638 op_mode = GET_MODE (XEXP (operands[1], 0));
22639 if (op_mode == VOIDmode)
22640 op_mode = GET_MODE (XEXP (operands[1], 1));
22642 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22644 PUT_MODE (condition_rtx, DImode);
22645 convert_move (result, condition_rtx, 0);
22647 else
22649 PUT_MODE (condition_rtx, SImode);
22650 emit_insn (gen_rtx_SET (result, condition_rtx));
22654 /* Emit a branch of kind CODE to location LOC. */
22656 void
22657 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22659 rtx condition_rtx, loc_ref;
22661 condition_rtx = rs6000_generate_compare (operands[0], mode);
22662 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22663 emit_jump_insn (gen_rtx_SET (pc_rtx,
22664 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22665 loc_ref, pc_rtx)));
22668 /* Return the string to output a conditional branch to LABEL, which is
22669 the operand template of the label, or NULL if the branch is really a
22670 conditional return.
22672 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22673 condition code register and its mode specifies what kind of
22674 comparison we made.
22676 REVERSED is nonzero if we should reverse the sense of the comparison.
22678 INSN is the insn. */
22680 char *
22681 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22683 static char string[64];
22684 enum rtx_code code = GET_CODE (op);
22685 rtx cc_reg = XEXP (op, 0);
22686 machine_mode mode = GET_MODE (cc_reg);
22687 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22688 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22689 int really_reversed = reversed ^ need_longbranch;
22690 char *s = string;
22691 const char *ccode;
22692 const char *pred;
22693 rtx note;
22695 validate_condition_mode (code, mode);
22697 /* Work out which way this really branches. We could use
22698 reverse_condition_maybe_unordered here always but this
22699 makes the resulting assembler clearer. */
22700 if (really_reversed)
22702 /* Reversal of FP compares takes care -- an ordered compare
22703 becomes an unordered compare and vice versa. */
22704 if (mode == CCFPmode)
22705 code = reverse_condition_maybe_unordered (code);
22706 else
22707 code = reverse_condition (code);
22710 switch (code)
22712 /* Not all of these are actually distinct opcodes, but
22713 we distinguish them for clarity of the resulting assembler. */
22714 case NE: case LTGT:
22715 ccode = "ne"; break;
22716 case EQ: case UNEQ:
22717 ccode = "eq"; break;
22718 case GE: case GEU:
22719 ccode = "ge"; break;
22720 case GT: case GTU: case UNGT:
22721 ccode = "gt"; break;
22722 case LE: case LEU:
22723 ccode = "le"; break;
22724 case LT: case LTU: case UNLT:
22725 ccode = "lt"; break;
22726 case UNORDERED: ccode = "un"; break;
22727 case ORDERED: ccode = "nu"; break;
22728 case UNGE: ccode = "nl"; break;
22729 case UNLE: ccode = "ng"; break;
22730 default:
22731 gcc_unreachable ();
22734 /* Maybe we have a guess as to how likely the branch is. */
22735 pred = "";
22736 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22737 if (note != NULL_RTX)
22739 /* PROB is the difference from 50%. */
22740 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22741 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22743 /* Only hint for highly probable/improbable branches on newer cpus when
22744 we have real profile data, as static prediction overrides processor
22745 dynamic prediction. For older cpus we may as well always hint, but
22746 assume not taken for branches that are very close to 50% as a
22747 mispredicted taken branch is more expensive than a
22748 mispredicted not-taken branch. */
22749 if (rs6000_always_hint
22750 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22751 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22752 && br_prob_note_reliable_p (note)))
22754 if (abs (prob) > REG_BR_PROB_BASE / 20
22755 && ((prob > 0) ^ need_longbranch))
22756 pred = "+";
22757 else
22758 pred = "-";
22762 if (label == NULL)
22763 s += sprintf (s, "b%slr%s ", ccode, pred);
22764 else
22765 s += sprintf (s, "b%s%s ", ccode, pred);
22767 /* We need to escape any '%' characters in the reg_names string.
22768 Assume they'd only be the first character.... */
22769 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22770 *s++ = '%';
22771 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22773 if (label != NULL)
22775 /* If the branch distance was too far, we may have to use an
22776 unconditional branch to go the distance. */
22777 if (need_longbranch)
22778 s += sprintf (s, ",$+8\n\tb %s", label);
22779 else
22780 s += sprintf (s, ",%s", label);
22783 return string;
22786 /* Return insn for VSX or Altivec comparisons. */
22788 static rtx
22789 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22791 rtx mask;
22792 machine_mode mode = GET_MODE (op0);
22794 switch (code)
22796 default:
22797 break;
22799 case GE:
22800 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22801 return NULL_RTX;
22802 /* FALLTHRU */
22804 case EQ:
22805 case GT:
22806 case GTU:
22807 case ORDERED:
22808 case UNORDERED:
22809 case UNEQ:
22810 case LTGT:
22811 mask = gen_reg_rtx (mode);
22812 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22813 return mask;
22816 return NULL_RTX;
22819 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22820 DMODE is expected destination mode. This is a recursive function. */
22822 static rtx
22823 rs6000_emit_vector_compare (enum rtx_code rcode,
22824 rtx op0, rtx op1,
22825 machine_mode dmode)
22827 rtx mask;
22828 bool swap_operands = false;
22829 bool try_again = false;
22831 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22832 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22834 /* See if the comparison works as is. */
22835 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22836 if (mask)
22837 return mask;
22839 switch (rcode)
22841 case LT:
22842 rcode = GT;
22843 swap_operands = true;
22844 try_again = true;
22845 break;
22846 case LTU:
22847 rcode = GTU;
22848 swap_operands = true;
22849 try_again = true;
22850 break;
22851 case NE:
22852 case UNLE:
22853 case UNLT:
22854 case UNGE:
22855 case UNGT:
22856 /* Invert condition and try again.
22857 e.g., A != B becomes ~(A==B). */
22859 enum rtx_code rev_code;
22860 enum insn_code nor_code;
22861 rtx mask2;
22863 rev_code = reverse_condition_maybe_unordered (rcode);
22864 if (rev_code == UNKNOWN)
22865 return NULL_RTX;
22867 nor_code = optab_handler (one_cmpl_optab, dmode);
22868 if (nor_code == CODE_FOR_nothing)
22869 return NULL_RTX;
22871 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22872 if (!mask2)
22873 return NULL_RTX;
22875 mask = gen_reg_rtx (dmode);
22876 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22877 return mask;
22879 break;
22880 case GE:
22881 case GEU:
22882 case LE:
22883 case LEU:
22884 /* Try GT/GTU/LT/LTU OR EQ */
22886 rtx c_rtx, eq_rtx;
22887 enum insn_code ior_code;
22888 enum rtx_code new_code;
22890 switch (rcode)
22892 case GE:
22893 new_code = GT;
22894 break;
22896 case GEU:
22897 new_code = GTU;
22898 break;
22900 case LE:
22901 new_code = LT;
22902 break;
22904 case LEU:
22905 new_code = LTU;
22906 break;
22908 default:
22909 gcc_unreachable ();
22912 ior_code = optab_handler (ior_optab, dmode);
22913 if (ior_code == CODE_FOR_nothing)
22914 return NULL_RTX;
22916 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22917 if (!c_rtx)
22918 return NULL_RTX;
22920 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22921 if (!eq_rtx)
22922 return NULL_RTX;
22924 mask = gen_reg_rtx (dmode);
22925 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22926 return mask;
22928 break;
22929 default:
22930 return NULL_RTX;
22933 if (try_again)
22935 if (swap_operands)
22936 std::swap (op0, op1);
22938 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22939 if (mask)
22940 return mask;
22943 /* You only get two chances. */
22944 return NULL_RTX;
22947 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22948 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22949 operands for the relation operation COND. */
22952 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22953 rtx cond, rtx cc_op0, rtx cc_op1)
22955 machine_mode dest_mode = GET_MODE (dest);
22956 machine_mode mask_mode = GET_MODE (cc_op0);
22957 enum rtx_code rcode = GET_CODE (cond);
22958 machine_mode cc_mode = CCmode;
22959 rtx mask;
22960 rtx cond2;
22961 bool invert_move = false;
22963 if (VECTOR_UNIT_NONE_P (dest_mode))
22964 return 0;
22966 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22967 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22969 switch (rcode)
22971 /* Swap operands if we can, and fall back to doing the operation as
22972 specified, and doing a NOR to invert the test. */
22973 case NE:
22974 case UNLE:
22975 case UNLT:
22976 case UNGE:
22977 case UNGT:
22978 /* Invert condition and try again.
22979 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22980 invert_move = true;
22981 rcode = reverse_condition_maybe_unordered (rcode);
22982 if (rcode == UNKNOWN)
22983 return 0;
22984 break;
22986 case GE:
22987 case LE:
22988 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22990 /* Invert condition to avoid compound test. */
22991 invert_move = true;
22992 rcode = reverse_condition (rcode);
22994 break;
22996 case GTU:
22997 case GEU:
22998 case LTU:
22999 case LEU:
23000 /* Mark unsigned tests with CCUNSmode. */
23001 cc_mode = CCUNSmode;
23003 /* Invert condition to avoid compound test if necessary. */
23004 if (rcode == GEU || rcode == LEU)
23006 invert_move = true;
23007 rcode = reverse_condition (rcode);
23009 break;
23011 default:
23012 break;
23015 /* Get the vector mask for the given relational operations. */
23016 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
23018 if (!mask)
23019 return 0;
23021 if (invert_move)
23022 std::swap (op_true, op_false);
23024 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
23025 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
23026 && (GET_CODE (op_true) == CONST_VECTOR
23027 || GET_CODE (op_false) == CONST_VECTOR))
23029 rtx constant_0 = CONST0_RTX (dest_mode);
23030 rtx constant_m1 = CONSTM1_RTX (dest_mode);
23032 if (op_true == constant_m1 && op_false == constant_0)
23034 emit_move_insn (dest, mask);
23035 return 1;
23038 else if (op_true == constant_0 && op_false == constant_m1)
23040 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
23041 return 1;
23044 /* If we can't use the vector comparison directly, perhaps we can use
23045 the mask for the true or false fields, instead of loading up a
23046 constant. */
23047 if (op_true == constant_m1)
23048 op_true = mask;
23050 if (op_false == constant_0)
23051 op_false = mask;
23054 if (!REG_P (op_true) && !SUBREG_P (op_true))
23055 op_true = force_reg (dest_mode, op_true);
23057 if (!REG_P (op_false) && !SUBREG_P (op_false))
23058 op_false = force_reg (dest_mode, op_false);
23060 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
23061 CONST0_RTX (dest_mode));
23062 emit_insn (gen_rtx_SET (dest,
23063 gen_rtx_IF_THEN_ELSE (dest_mode,
23064 cond2,
23065 op_true,
23066 op_false)));
23067 return 1;
23070 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
23071 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
23072 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
23073 hardware has no such operation. */
23075 static int
23076 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23078 enum rtx_code code = GET_CODE (op);
23079 rtx op0 = XEXP (op, 0);
23080 rtx op1 = XEXP (op, 1);
23081 machine_mode compare_mode = GET_MODE (op0);
23082 machine_mode result_mode = GET_MODE (dest);
23083 bool max_p = false;
23085 if (result_mode != compare_mode)
23086 return 0;
23088 if (code == GE || code == GT)
23089 max_p = true;
23090 else if (code == LE || code == LT)
23091 max_p = false;
23092 else
23093 return 0;
23095 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
23098 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
23099 max_p = !max_p;
23101 else
23102 return 0;
23104 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
23105 return 1;
23108 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23109 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23110 operands of the last comparison is nonzero/true, FALSE_COND if it is
23111 zero/false. Return 0 if the hardware has no such operation. */
23113 static int
23114 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23116 enum rtx_code code = GET_CODE (op);
23117 rtx op0 = XEXP (op, 0);
23118 rtx op1 = XEXP (op, 1);
23119 machine_mode result_mode = GET_MODE (dest);
23120 rtx compare_rtx;
23121 rtx cmove_rtx;
23122 rtx clobber_rtx;
23124 if (!can_create_pseudo_p ())
23125 return 0;
23127 switch (code)
23129 case EQ:
23130 case GE:
23131 case GT:
23132 break;
23134 case NE:
23135 case LT:
23136 case LE:
23137 code = swap_condition (code);
23138 std::swap (op0, op1);
23139 break;
23141 default:
23142 return 0;
23145 /* Generate: [(parallel [(set (dest)
23146 (if_then_else (op (cmp1) (cmp2))
23147 (true)
23148 (false)))
23149 (clobber (scratch))])]. */
23151 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23152 cmove_rtx = gen_rtx_SET (dest,
23153 gen_rtx_IF_THEN_ELSE (result_mode,
23154 compare_rtx,
23155 true_cond,
23156 false_cond));
23158 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23159 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23160 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23162 return 1;
23165 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23166 operands of the last comparison is nonzero/true, FALSE_COND if it
23167 is zero/false. Return 0 if the hardware has no such operation. */
23170 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23172 enum rtx_code code = GET_CODE (op);
23173 rtx op0 = XEXP (op, 0);
23174 rtx op1 = XEXP (op, 1);
23175 machine_mode compare_mode = GET_MODE (op0);
23176 machine_mode result_mode = GET_MODE (dest);
23177 rtx temp;
23178 bool is_against_zero;
23180 /* These modes should always match. */
23181 if (GET_MODE (op1) != compare_mode
23182 /* In the isel case however, we can use a compare immediate, so
23183 op1 may be a small constant. */
23184 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23185 return 0;
23186 if (GET_MODE (true_cond) != result_mode)
23187 return 0;
23188 if (GET_MODE (false_cond) != result_mode)
23189 return 0;
23191 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23192 if (TARGET_P9_MINMAX
23193 && (compare_mode == SFmode || compare_mode == DFmode)
23194 && (result_mode == SFmode || result_mode == DFmode))
23196 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23197 return 1;
23199 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23200 return 1;
23203 /* Don't allow using floating point comparisons for integer results for
23204 now. */
23205 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23206 return 0;
23208 /* First, work out if the hardware can do this at all, or
23209 if it's too slow.... */
23210 if (!FLOAT_MODE_P (compare_mode))
23212 if (TARGET_ISEL)
23213 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23214 return 0;
23217 is_against_zero = op1 == CONST0_RTX (compare_mode);
23219 /* A floating-point subtract might overflow, underflow, or produce
23220 an inexact result, thus changing the floating-point flags, so it
23221 can't be generated if we care about that. It's safe if one side
23222 of the construct is zero, since then no subtract will be
23223 generated. */
23224 if (SCALAR_FLOAT_MODE_P (compare_mode)
23225 && flag_trapping_math && ! is_against_zero)
23226 return 0;
23228 /* Eliminate half of the comparisons by switching operands, this
23229 makes the remaining code simpler. */
23230 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23231 || code == LTGT || code == LT || code == UNLE)
23233 code = reverse_condition_maybe_unordered (code);
23234 temp = true_cond;
23235 true_cond = false_cond;
23236 false_cond = temp;
23239 /* UNEQ and LTGT take four instructions for a comparison with zero,
23240 it'll probably be faster to use a branch here too. */
23241 if (code == UNEQ && HONOR_NANS (compare_mode))
23242 return 0;
23244 /* We're going to try to implement comparisons by performing
23245 a subtract, then comparing against zero. Unfortunately,
23246 Inf - Inf is NaN which is not zero, and so if we don't
23247 know that the operand is finite and the comparison
23248 would treat EQ different to UNORDERED, we can't do it. */
23249 if (HONOR_INFINITIES (compare_mode)
23250 && code != GT && code != UNGE
23251 && (GET_CODE (op1) != CONST_DOUBLE
23252 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23253 /* Constructs of the form (a OP b ? a : b) are safe. */
23254 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23255 || (! rtx_equal_p (op0, true_cond)
23256 && ! rtx_equal_p (op1, true_cond))))
23257 return 0;
23259 /* At this point we know we can use fsel. */
23261 /* Reduce the comparison to a comparison against zero. */
23262 if (! is_against_zero)
23264 temp = gen_reg_rtx (compare_mode);
23265 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23266 op0 = temp;
23267 op1 = CONST0_RTX (compare_mode);
23270 /* If we don't care about NaNs we can reduce some of the comparisons
23271 down to faster ones. */
23272 if (! HONOR_NANS (compare_mode))
23273 switch (code)
23275 case GT:
23276 code = LE;
23277 temp = true_cond;
23278 true_cond = false_cond;
23279 false_cond = temp;
23280 break;
23281 case UNGE:
23282 code = GE;
23283 break;
23284 case UNEQ:
23285 code = EQ;
23286 break;
23287 default:
23288 break;
23291 /* Now, reduce everything down to a GE. */
23292 switch (code)
23294 case GE:
23295 break;
23297 case LE:
23298 temp = gen_reg_rtx (compare_mode);
23299 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23300 op0 = temp;
23301 break;
23303 case ORDERED:
23304 temp = gen_reg_rtx (compare_mode);
23305 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23306 op0 = temp;
23307 break;
23309 case EQ:
23310 temp = gen_reg_rtx (compare_mode);
23311 emit_insn (gen_rtx_SET (temp,
23312 gen_rtx_NEG (compare_mode,
23313 gen_rtx_ABS (compare_mode, op0))));
23314 op0 = temp;
23315 break;
23317 case UNGE:
23318 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23319 temp = gen_reg_rtx (result_mode);
23320 emit_insn (gen_rtx_SET (temp,
23321 gen_rtx_IF_THEN_ELSE (result_mode,
23322 gen_rtx_GE (VOIDmode,
23323 op0, op1),
23324 true_cond, false_cond)));
23325 false_cond = true_cond;
23326 true_cond = temp;
23328 temp = gen_reg_rtx (compare_mode);
23329 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23330 op0 = temp;
23331 break;
23333 case GT:
23334 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23335 temp = gen_reg_rtx (result_mode);
23336 emit_insn (gen_rtx_SET (temp,
23337 gen_rtx_IF_THEN_ELSE (result_mode,
23338 gen_rtx_GE (VOIDmode,
23339 op0, op1),
23340 true_cond, false_cond)));
23341 true_cond = false_cond;
23342 false_cond = temp;
23344 temp = gen_reg_rtx (compare_mode);
23345 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23346 op0 = temp;
23347 break;
23349 default:
23350 gcc_unreachable ();
23353 emit_insn (gen_rtx_SET (dest,
23354 gen_rtx_IF_THEN_ELSE (result_mode,
23355 gen_rtx_GE (VOIDmode,
23356 op0, op1),
23357 true_cond, false_cond)));
23358 return 1;
23361 /* Same as above, but for ints (isel). */
23364 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23366 rtx condition_rtx, cr;
23367 machine_mode mode = GET_MODE (dest);
23368 enum rtx_code cond_code;
23369 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23370 bool signedp;
23372 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23373 return 0;
23375 /* We still have to do the compare, because isel doesn't do a
23376 compare, it just looks at the CRx bits set by a previous compare
23377 instruction. */
23378 condition_rtx = rs6000_generate_compare (op, mode);
23379 cond_code = GET_CODE (condition_rtx);
23380 cr = XEXP (condition_rtx, 0);
23381 signedp = GET_MODE (cr) == CCmode;
23383 isel_func = (mode == SImode
23384 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23385 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23387 switch (cond_code)
23389 case LT: case GT: case LTU: case GTU: case EQ:
23390 /* isel handles these directly. */
23391 break;
23393 default:
23394 /* We need to swap the sense of the comparison. */
23396 std::swap (false_cond, true_cond);
23397 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23399 break;
23402 false_cond = force_reg (mode, false_cond);
23403 if (true_cond != const0_rtx)
23404 true_cond = force_reg (mode, true_cond);
23406 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23408 return 1;
23411 void
23412 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23414 machine_mode mode = GET_MODE (op0);
23415 enum rtx_code c;
23416 rtx target;
23418 /* VSX/altivec have direct min/max insns. */
23419 if ((code == SMAX || code == SMIN)
23420 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23421 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23423 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23424 return;
23427 if (code == SMAX || code == SMIN)
23428 c = GE;
23429 else
23430 c = GEU;
23432 if (code == SMAX || code == UMAX)
23433 target = emit_conditional_move (dest, c, op0, op1, mode,
23434 op0, op1, mode, 0);
23435 else
23436 target = emit_conditional_move (dest, c, op0, op1, mode,
23437 op1, op0, mode, 0);
23438 gcc_assert (target);
23439 if (target != dest)
23440 emit_move_insn (dest, target);
23443 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23444 COND is true. Mark the jump as unlikely to be taken. */
23446 static void
23447 emit_unlikely_jump (rtx cond, rtx label)
23449 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23450 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23451 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23454 /* A subroutine of the atomic operation splitters. Emit a load-locked
23455 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23456 the zero_extend operation. */
23458 static void
23459 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23461 rtx (*fn) (rtx, rtx) = NULL;
23463 switch (mode)
23465 case E_QImode:
23466 fn = gen_load_lockedqi;
23467 break;
23468 case E_HImode:
23469 fn = gen_load_lockedhi;
23470 break;
23471 case E_SImode:
23472 if (GET_MODE (mem) == QImode)
23473 fn = gen_load_lockedqi_si;
23474 else if (GET_MODE (mem) == HImode)
23475 fn = gen_load_lockedhi_si;
23476 else
23477 fn = gen_load_lockedsi;
23478 break;
23479 case E_DImode:
23480 fn = gen_load_lockeddi;
23481 break;
23482 case E_TImode:
23483 fn = gen_load_lockedti;
23484 break;
23485 default:
23486 gcc_unreachable ();
23488 emit_insn (fn (reg, mem));
23491 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23492 instruction in MODE. */
23494 static void
23495 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23497 rtx (*fn) (rtx, rtx, rtx) = NULL;
23499 switch (mode)
23501 case E_QImode:
23502 fn = gen_store_conditionalqi;
23503 break;
23504 case E_HImode:
23505 fn = gen_store_conditionalhi;
23506 break;
23507 case E_SImode:
23508 fn = gen_store_conditionalsi;
23509 break;
23510 case E_DImode:
23511 fn = gen_store_conditionaldi;
23512 break;
23513 case E_TImode:
23514 fn = gen_store_conditionalti;
23515 break;
23516 default:
23517 gcc_unreachable ();
23520 /* Emit sync before stwcx. to address PPC405 Erratum. */
23521 if (PPC405_ERRATUM77)
23522 emit_insn (gen_hwsync ());
23524 emit_insn (fn (res, mem, val));
23527 /* Expand barriers before and after a load_locked/store_cond sequence. */
23529 static rtx
23530 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23532 rtx addr = XEXP (mem, 0);
23534 if (!legitimate_indirect_address_p (addr, reload_completed)
23535 && !legitimate_indexed_address_p (addr, reload_completed))
23537 addr = force_reg (Pmode, addr);
23538 mem = replace_equiv_address_nv (mem, addr);
23541 switch (model)
23543 case MEMMODEL_RELAXED:
23544 case MEMMODEL_CONSUME:
23545 case MEMMODEL_ACQUIRE:
23546 break;
23547 case MEMMODEL_RELEASE:
23548 case MEMMODEL_ACQ_REL:
23549 emit_insn (gen_lwsync ());
23550 break;
23551 case MEMMODEL_SEQ_CST:
23552 emit_insn (gen_hwsync ());
23553 break;
23554 default:
23555 gcc_unreachable ();
23557 return mem;
23560 static void
23561 rs6000_post_atomic_barrier (enum memmodel model)
23563 switch (model)
23565 case MEMMODEL_RELAXED:
23566 case MEMMODEL_CONSUME:
23567 case MEMMODEL_RELEASE:
23568 break;
23569 case MEMMODEL_ACQUIRE:
23570 case MEMMODEL_ACQ_REL:
23571 case MEMMODEL_SEQ_CST:
23572 emit_insn (gen_isync ());
23573 break;
23574 default:
23575 gcc_unreachable ();
23579 /* A subroutine of the various atomic expanders. For sub-word operations,
23580 we must adjust things to operate on SImode. Given the original MEM,
23581 return a new aligned memory. Also build and return the quantities by
23582 which to shift and mask. */
23584 static rtx
23585 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23587 rtx addr, align, shift, mask, mem;
23588 HOST_WIDE_INT shift_mask;
23589 machine_mode mode = GET_MODE (orig_mem);
23591 /* For smaller modes, we have to implement this via SImode. */
23592 shift_mask = (mode == QImode ? 0x18 : 0x10);
23594 addr = XEXP (orig_mem, 0);
23595 addr = force_reg (GET_MODE (addr), addr);
23597 /* Aligned memory containing subword. Generate a new memory. We
23598 do not want any of the existing MEM_ATTR data, as we're now
23599 accessing memory outside the original object. */
23600 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23601 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23602 mem = gen_rtx_MEM (SImode, align);
23603 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23604 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23605 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23607 /* Shift amount for subword relative to aligned word. */
23608 shift = gen_reg_rtx (SImode);
23609 addr = gen_lowpart (SImode, addr);
23610 rtx tmp = gen_reg_rtx (SImode);
23611 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23612 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23613 if (BYTES_BIG_ENDIAN)
23614 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23615 shift, 1, OPTAB_LIB_WIDEN);
23616 *pshift = shift;
23618 /* Mask for insertion. */
23619 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23620 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23621 *pmask = mask;
23623 return mem;
23626 /* A subroutine of the various atomic expanders. For sub-word operands,
23627 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23629 static rtx
23630 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23632 rtx x;
23634 x = gen_reg_rtx (SImode);
23635 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23636 gen_rtx_NOT (SImode, mask),
23637 oldval)));
23639 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23641 return x;
23644 /* A subroutine of the various atomic expanders. For sub-word operands,
23645 extract WIDE to NARROW via SHIFT. */
23647 static void
23648 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23650 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23651 wide, 1, OPTAB_LIB_WIDEN);
23652 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23655 /* Expand an atomic compare and swap operation. */
23657 void
23658 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23660 rtx boolval, retval, mem, oldval, newval, cond;
23661 rtx label1, label2, x, mask, shift;
23662 machine_mode mode, orig_mode;
23663 enum memmodel mod_s, mod_f;
23664 bool is_weak;
23666 boolval = operands[0];
23667 retval = operands[1];
23668 mem = operands[2];
23669 oldval = operands[3];
23670 newval = operands[4];
23671 is_weak = (INTVAL (operands[5]) != 0);
23672 mod_s = memmodel_base (INTVAL (operands[6]));
23673 mod_f = memmodel_base (INTVAL (operands[7]));
23674 orig_mode = mode = GET_MODE (mem);
23676 mask = shift = NULL_RTX;
23677 if (mode == QImode || mode == HImode)
23679 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23680 lwarx and shift/mask operations. With power8, we need to do the
23681 comparison in SImode, but the store is still done in QI/HImode. */
23682 oldval = convert_modes (SImode, mode, oldval, 1);
23684 if (!TARGET_SYNC_HI_QI)
23686 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23688 /* Shift and mask OLDVAL into position with the word. */
23689 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23690 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23692 /* Shift and mask NEWVAL into position within the word. */
23693 newval = convert_modes (SImode, mode, newval, 1);
23694 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23695 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23698 /* Prepare to adjust the return value. */
23699 retval = gen_reg_rtx (SImode);
23700 mode = SImode;
23702 else if (reg_overlap_mentioned_p (retval, oldval))
23703 oldval = copy_to_reg (oldval);
23705 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23706 oldval = copy_to_mode_reg (mode, oldval);
23708 if (reg_overlap_mentioned_p (retval, newval))
23709 newval = copy_to_reg (newval);
23711 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23713 label1 = NULL_RTX;
23714 if (!is_weak)
23716 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23717 emit_label (XEXP (label1, 0));
23719 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23721 emit_load_locked (mode, retval, mem);
23723 x = retval;
23724 if (mask)
23725 x = expand_simple_binop (SImode, AND, retval, mask,
23726 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23728 cond = gen_reg_rtx (CCmode);
23729 /* If we have TImode, synthesize a comparison. */
23730 if (mode != TImode)
23731 x = gen_rtx_COMPARE (CCmode, x, oldval);
23732 else
23734 rtx xor1_result = gen_reg_rtx (DImode);
23735 rtx xor2_result = gen_reg_rtx (DImode);
23736 rtx or_result = gen_reg_rtx (DImode);
23737 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23738 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23739 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23740 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23742 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23743 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23744 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23745 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23748 emit_insn (gen_rtx_SET (cond, x));
23750 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23751 emit_unlikely_jump (x, label2);
23753 x = newval;
23754 if (mask)
23755 x = rs6000_mask_atomic_subword (retval, newval, mask);
23757 emit_store_conditional (orig_mode, cond, mem, x);
23759 if (!is_weak)
23761 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23762 emit_unlikely_jump (x, label1);
23765 if (!is_mm_relaxed (mod_f))
23766 emit_label (XEXP (label2, 0));
23768 rs6000_post_atomic_barrier (mod_s);
23770 if (is_mm_relaxed (mod_f))
23771 emit_label (XEXP (label2, 0));
23773 if (shift)
23774 rs6000_finish_atomic_subword (operands[1], retval, shift);
23775 else if (mode != GET_MODE (operands[1]))
23776 convert_move (operands[1], retval, 1);
23778 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23779 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23780 emit_insn (gen_rtx_SET (boolval, x));
23783 /* Expand an atomic exchange operation. */
23785 void
23786 rs6000_expand_atomic_exchange (rtx operands[])
23788 rtx retval, mem, val, cond;
23789 machine_mode mode;
23790 enum memmodel model;
23791 rtx label, x, mask, shift;
23793 retval = operands[0];
23794 mem = operands[1];
23795 val = operands[2];
23796 model = memmodel_base (INTVAL (operands[3]));
23797 mode = GET_MODE (mem);
23799 mask = shift = NULL_RTX;
23800 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23802 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23804 /* Shift and mask VAL into position with the word. */
23805 val = convert_modes (SImode, mode, val, 1);
23806 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23807 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23809 /* Prepare to adjust the return value. */
23810 retval = gen_reg_rtx (SImode);
23811 mode = SImode;
23814 mem = rs6000_pre_atomic_barrier (mem, model);
23816 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23817 emit_label (XEXP (label, 0));
23819 emit_load_locked (mode, retval, mem);
23821 x = val;
23822 if (mask)
23823 x = rs6000_mask_atomic_subword (retval, val, mask);
23825 cond = gen_reg_rtx (CCmode);
23826 emit_store_conditional (mode, cond, mem, x);
23828 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23829 emit_unlikely_jump (x, label);
23831 rs6000_post_atomic_barrier (model);
23833 if (shift)
23834 rs6000_finish_atomic_subword (operands[0], retval, shift);
23837 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23838 to perform. MEM is the memory on which to operate. VAL is the second
23839 operand of the binary operator. BEFORE and AFTER are optional locations to
23840 return the value of MEM either before of after the operation. MODEL_RTX
23841 is a CONST_INT containing the memory model to use. */
23843 void
23844 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23845 rtx orig_before, rtx orig_after, rtx model_rtx)
23847 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23848 machine_mode mode = GET_MODE (mem);
23849 machine_mode store_mode = mode;
23850 rtx label, x, cond, mask, shift;
23851 rtx before = orig_before, after = orig_after;
23853 mask = shift = NULL_RTX;
23854 /* On power8, we want to use SImode for the operation. On previous systems,
23855 use the operation in a subword and shift/mask to get the proper byte or
23856 halfword. */
23857 if (mode == QImode || mode == HImode)
23859 if (TARGET_SYNC_HI_QI)
23861 val = convert_modes (SImode, mode, val, 1);
23863 /* Prepare to adjust the return value. */
23864 before = gen_reg_rtx (SImode);
23865 if (after)
23866 after = gen_reg_rtx (SImode);
23867 mode = SImode;
23869 else
23871 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23873 /* Shift and mask VAL into position with the word. */
23874 val = convert_modes (SImode, mode, val, 1);
23875 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23876 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23878 switch (code)
23880 case IOR:
23881 case XOR:
23882 /* We've already zero-extended VAL. That is sufficient to
23883 make certain that it does not affect other bits. */
23884 mask = NULL;
23885 break;
23887 case AND:
23888 /* If we make certain that all of the other bits in VAL are
23889 set, that will be sufficient to not affect other bits. */
23890 x = gen_rtx_NOT (SImode, mask);
23891 x = gen_rtx_IOR (SImode, x, val);
23892 emit_insn (gen_rtx_SET (val, x));
23893 mask = NULL;
23894 break;
23896 case NOT:
23897 case PLUS:
23898 case MINUS:
23899 /* These will all affect bits outside the field and need
23900 adjustment via MASK within the loop. */
23901 break;
23903 default:
23904 gcc_unreachable ();
23907 /* Prepare to adjust the return value. */
23908 before = gen_reg_rtx (SImode);
23909 if (after)
23910 after = gen_reg_rtx (SImode);
23911 store_mode = mode = SImode;
23915 mem = rs6000_pre_atomic_barrier (mem, model);
23917 label = gen_label_rtx ();
23918 emit_label (label);
23919 label = gen_rtx_LABEL_REF (VOIDmode, label);
23921 if (before == NULL_RTX)
23922 before = gen_reg_rtx (mode);
23924 emit_load_locked (mode, before, mem);
23926 if (code == NOT)
23928 x = expand_simple_binop (mode, AND, before, val,
23929 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23930 after = expand_simple_unop (mode, NOT, x, after, 1);
23932 else
23934 after = expand_simple_binop (mode, code, before, val,
23935 after, 1, OPTAB_LIB_WIDEN);
23938 x = after;
23939 if (mask)
23941 x = expand_simple_binop (SImode, AND, after, mask,
23942 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23943 x = rs6000_mask_atomic_subword (before, x, mask);
23945 else if (store_mode != mode)
23946 x = convert_modes (store_mode, mode, x, 1);
23948 cond = gen_reg_rtx (CCmode);
23949 emit_store_conditional (store_mode, cond, mem, x);
23951 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23952 emit_unlikely_jump (x, label);
23954 rs6000_post_atomic_barrier (model);
23956 if (shift)
23958 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23959 then do the calcuations in a SImode register. */
23960 if (orig_before)
23961 rs6000_finish_atomic_subword (orig_before, before, shift);
23962 if (orig_after)
23963 rs6000_finish_atomic_subword (orig_after, after, shift);
23965 else if (store_mode != mode)
23967 /* QImode/HImode on machines with lbarx/lharx where we do the native
23968 operation and then do the calcuations in a SImode register. */
23969 if (orig_before)
23970 convert_move (orig_before, before, 1);
23971 if (orig_after)
23972 convert_move (orig_after, after, 1);
23974 else if (orig_after && after != orig_after)
23975 emit_move_insn (orig_after, after);
23978 /* Emit instructions to move SRC to DST. Called by splitters for
23979 multi-register moves. It will emit at most one instruction for
23980 each register that is accessed; that is, it won't emit li/lis pairs
23981 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23982 register. */
23984 void
23985 rs6000_split_multireg_move (rtx dst, rtx src)
23987 /* The register number of the first register being moved. */
23988 int reg;
23989 /* The mode that is to be moved. */
23990 machine_mode mode;
23991 /* The mode that the move is being done in, and its size. */
23992 machine_mode reg_mode;
23993 int reg_mode_size;
23994 /* The number of registers that will be moved. */
23995 int nregs;
23997 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23998 mode = GET_MODE (dst);
23999 nregs = hard_regno_nregs (reg, mode);
24000 if (FP_REGNO_P (reg))
24001 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
24002 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
24003 else if (ALTIVEC_REGNO_P (reg))
24004 reg_mode = V16QImode;
24005 else
24006 reg_mode = word_mode;
24007 reg_mode_size = GET_MODE_SIZE (reg_mode);
24009 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
24011 /* TDmode residing in FP registers is special, since the ISA requires that
24012 the lower-numbered word of a register pair is always the most significant
24013 word, even in little-endian mode. This does not match the usual subreg
24014 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
24015 the appropriate constituent registers "by hand" in little-endian mode.
24017 Note we do not need to check for destructive overlap here since TDmode
24018 can only reside in even/odd register pairs. */
24019 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
24021 rtx p_src, p_dst;
24022 int i;
24024 for (i = 0; i < nregs; i++)
24026 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
24027 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
24028 else
24029 p_src = simplify_gen_subreg (reg_mode, src, mode,
24030 i * reg_mode_size);
24032 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
24033 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
24034 else
24035 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
24036 i * reg_mode_size);
24038 emit_insn (gen_rtx_SET (p_dst, p_src));
24041 return;
24044 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
24046 /* Move register range backwards, if we might have destructive
24047 overlap. */
24048 int i;
24049 for (i = nregs - 1; i >= 0; i--)
24050 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24051 i * reg_mode_size),
24052 simplify_gen_subreg (reg_mode, src, mode,
24053 i * reg_mode_size)));
24055 else
24057 int i;
24058 int j = -1;
24059 bool used_update = false;
24060 rtx restore_basereg = NULL_RTX;
24062 if (MEM_P (src) && INT_REGNO_P (reg))
24064 rtx breg;
24066 if (GET_CODE (XEXP (src, 0)) == PRE_INC
24067 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
24069 rtx delta_rtx;
24070 breg = XEXP (XEXP (src, 0), 0);
24071 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
24072 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
24073 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
24074 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24075 src = replace_equiv_address (src, breg);
24077 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
24079 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
24081 rtx basereg = XEXP (XEXP (src, 0), 0);
24082 if (TARGET_UPDATE)
24084 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
24085 emit_insn (gen_rtx_SET (ndst,
24086 gen_rtx_MEM (reg_mode,
24087 XEXP (src, 0))));
24088 used_update = true;
24090 else
24091 emit_insn (gen_rtx_SET (basereg,
24092 XEXP (XEXP (src, 0), 1)));
24093 src = replace_equiv_address (src, basereg);
24095 else
24097 rtx basereg = gen_rtx_REG (Pmode, reg);
24098 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24099 src = replace_equiv_address (src, basereg);
24103 breg = XEXP (src, 0);
24104 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24105 breg = XEXP (breg, 0);
24107 /* If the base register we are using to address memory is
24108 also a destination reg, then change that register last. */
24109 if (REG_P (breg)
24110 && REGNO (breg) >= REGNO (dst)
24111 && REGNO (breg) < REGNO (dst) + nregs)
24112 j = REGNO (breg) - REGNO (dst);
24114 else if (MEM_P (dst) && INT_REGNO_P (reg))
24116 rtx breg;
24118 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24119 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24121 rtx delta_rtx;
24122 breg = XEXP (XEXP (dst, 0), 0);
24123 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24124 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24125 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24127 /* We have to update the breg before doing the store.
24128 Use store with update, if available. */
24130 if (TARGET_UPDATE)
24132 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24133 emit_insn (TARGET_32BIT
24134 ? (TARGET_POWERPC64
24135 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24136 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24137 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24138 used_update = true;
24140 else
24141 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24142 dst = replace_equiv_address (dst, breg);
24144 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
24145 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24147 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24149 rtx basereg = XEXP (XEXP (dst, 0), 0);
24150 if (TARGET_UPDATE)
24152 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24153 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24154 XEXP (dst, 0)),
24155 nsrc));
24156 used_update = true;
24158 else
24159 emit_insn (gen_rtx_SET (basereg,
24160 XEXP (XEXP (dst, 0), 1)));
24161 dst = replace_equiv_address (dst, basereg);
24163 else
24165 rtx basereg = XEXP (XEXP (dst, 0), 0);
24166 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24167 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24168 && REG_P (basereg)
24169 && REG_P (offsetreg)
24170 && REGNO (basereg) != REGNO (offsetreg));
24171 if (REGNO (basereg) == 0)
24173 rtx tmp = offsetreg;
24174 offsetreg = basereg;
24175 basereg = tmp;
24177 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24178 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24179 dst = replace_equiv_address (dst, basereg);
24182 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24183 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24186 for (i = 0; i < nregs; i++)
24188 /* Calculate index to next subword. */
24189 ++j;
24190 if (j == nregs)
24191 j = 0;
24193 /* If compiler already emitted move of first word by
24194 store with update, no need to do anything. */
24195 if (j == 0 && used_update)
24196 continue;
24198 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24199 j * reg_mode_size),
24200 simplify_gen_subreg (reg_mode, src, mode,
24201 j * reg_mode_size)));
24203 if (restore_basereg != NULL_RTX)
24204 emit_insn (restore_basereg);
24209 /* This page contains routines that are used to determine what the
24210 function prologue and epilogue code will do and write them out. */
24212 /* Determine whether the REG is really used. */
24214 static bool
24215 save_reg_p (int reg)
24217 /* We need to mark the PIC offset register live for the same conditions
24218 as it is set up, or otherwise it won't be saved before we clobber it. */
24220 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24222 /* When calling eh_return, we must return true for all the cases
24223 where conditional_register_usage marks the PIC offset reg
24224 call used. */
24225 if (TARGET_TOC && TARGET_MINIMAL_TOC
24226 && (crtl->calls_eh_return
24227 || df_regs_ever_live_p (reg)
24228 || !constant_pool_empty_p ()))
24229 return true;
24231 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24232 && flag_pic)
24233 return true;
24236 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24239 /* Return the first fixed-point register that is required to be
24240 saved. 32 if none. */
24243 first_reg_to_save (void)
24245 int first_reg;
24247 /* Find lowest numbered live register. */
24248 for (first_reg = 13; first_reg <= 31; first_reg++)
24249 if (save_reg_p (first_reg))
24250 break;
24252 #if TARGET_MACHO
24253 if (flag_pic
24254 && crtl->uses_pic_offset_table
24255 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24256 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24257 #endif
24259 return first_reg;
24262 /* Similar, for FP regs. */
24265 first_fp_reg_to_save (void)
24267 int first_reg;
24269 /* Find lowest numbered live register. */
24270 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24271 if (save_reg_p (first_reg))
24272 break;
24274 return first_reg;
24277 /* Similar, for AltiVec regs. */
24279 static int
24280 first_altivec_reg_to_save (void)
24282 int i;
24284 /* Stack frame remains as is unless we are in AltiVec ABI. */
24285 if (! TARGET_ALTIVEC_ABI)
24286 return LAST_ALTIVEC_REGNO + 1;
24288 /* On Darwin, the unwind routines are compiled without
24289 TARGET_ALTIVEC, and use save_world to save/restore the
24290 altivec registers when necessary. */
24291 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24292 && ! TARGET_ALTIVEC)
24293 return FIRST_ALTIVEC_REGNO + 20;
24295 /* Find lowest numbered live register. */
24296 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24297 if (save_reg_p (i))
24298 break;
24300 return i;
24303 /* Return a 32-bit mask of the AltiVec registers we need to set in
24304 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24305 the 32-bit word is 0. */
24307 static unsigned int
24308 compute_vrsave_mask (void)
24310 unsigned int i, mask = 0;
24312 /* On Darwin, the unwind routines are compiled without
24313 TARGET_ALTIVEC, and use save_world to save/restore the
24314 call-saved altivec registers when necessary. */
24315 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24316 && ! TARGET_ALTIVEC)
24317 mask |= 0xFFF;
24319 /* First, find out if we use _any_ altivec registers. */
24320 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24321 if (df_regs_ever_live_p (i))
24322 mask |= ALTIVEC_REG_BIT (i);
24324 if (mask == 0)
24325 return mask;
24327 /* Next, remove the argument registers from the set. These must
24328 be in the VRSAVE mask set by the caller, so we don't need to add
24329 them in again. More importantly, the mask we compute here is
24330 used to generate CLOBBERs in the set_vrsave insn, and we do not
24331 wish the argument registers to die. */
24332 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24333 mask &= ~ALTIVEC_REG_BIT (i);
24335 /* Similarly, remove the return value from the set. */
24337 bool yes = false;
24338 diddle_return_value (is_altivec_return_reg, &yes);
24339 if (yes)
24340 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24343 return mask;
24346 /* For a very restricted set of circumstances, we can cut down the
24347 size of prologues/epilogues by calling our own save/restore-the-world
24348 routines. */
24350 static void
24351 compute_save_world_info (rs6000_stack_t *info)
24353 info->world_save_p = 1;
24354 info->world_save_p
24355 = (WORLD_SAVE_P (info)
24356 && DEFAULT_ABI == ABI_DARWIN
24357 && !cfun->has_nonlocal_label
24358 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24359 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24360 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24361 && info->cr_save_p);
24363 /* This will not work in conjunction with sibcalls. Make sure there
24364 are none. (This check is expensive, but seldom executed.) */
24365 if (WORLD_SAVE_P (info))
24367 rtx_insn *insn;
24368 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24369 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24371 info->world_save_p = 0;
24372 break;
24376 if (WORLD_SAVE_P (info))
24378 /* Even if we're not touching VRsave, make sure there's room on the
24379 stack for it, if it looks like we're calling SAVE_WORLD, which
24380 will attempt to save it. */
24381 info->vrsave_size = 4;
24383 /* If we are going to save the world, we need to save the link register too. */
24384 info->lr_save_p = 1;
24386 /* "Save" the VRsave register too if we're saving the world. */
24387 if (info->vrsave_mask == 0)
24388 info->vrsave_mask = compute_vrsave_mask ();
24390 /* Because the Darwin register save/restore routines only handle
24391 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24392 check. */
24393 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24394 && (info->first_altivec_reg_save
24395 >= FIRST_SAVED_ALTIVEC_REGNO));
24398 return;
24402 static void
24403 is_altivec_return_reg (rtx reg, void *xyes)
24405 bool *yes = (bool *) xyes;
24406 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24407 *yes = true;
24411 /* Return whether REG is a global user reg or has been specifed by
24412 -ffixed-REG. We should not restore these, and so cannot use
24413 lmw or out-of-line restore functions if there are any. We also
24414 can't save them (well, emit frame notes for them), because frame
24415 unwinding during exception handling will restore saved registers. */
24417 static bool
24418 fixed_reg_p (int reg)
24420 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24421 backend sets it, overriding anything the user might have given. */
24422 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24423 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24424 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24425 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24426 return false;
24428 return fixed_regs[reg];
24431 /* Determine the strategy for savings/restoring registers. */
24433 enum {
24434 SAVE_MULTIPLE = 0x1,
24435 SAVE_INLINE_GPRS = 0x2,
24436 SAVE_INLINE_FPRS = 0x4,
24437 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24438 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24439 SAVE_INLINE_VRS = 0x20,
24440 REST_MULTIPLE = 0x100,
24441 REST_INLINE_GPRS = 0x200,
24442 REST_INLINE_FPRS = 0x400,
24443 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24444 REST_INLINE_VRS = 0x1000
24447 static int
24448 rs6000_savres_strategy (rs6000_stack_t *info,
24449 bool using_static_chain_p)
24451 int strategy = 0;
24453 /* Select between in-line and out-of-line save and restore of regs.
24454 First, all the obvious cases where we don't use out-of-line. */
24455 if (crtl->calls_eh_return
24456 || cfun->machine->ra_need_lr)
24457 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24458 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24459 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24461 if (info->first_gp_reg_save == 32)
24462 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24464 if (info->first_fp_reg_save == 64
24465 /* The out-of-line FP routines use double-precision stores;
24466 we can't use those routines if we don't have such stores. */
24467 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24468 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24470 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24471 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24473 /* Define cutoff for using out-of-line functions to save registers. */
24474 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24476 if (!optimize_size)
24478 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24479 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24480 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24482 else
24484 /* Prefer out-of-line restore if it will exit. */
24485 if (info->first_fp_reg_save > 61)
24486 strategy |= SAVE_INLINE_FPRS;
24487 if (info->first_gp_reg_save > 29)
24489 if (info->first_fp_reg_save == 64)
24490 strategy |= SAVE_INLINE_GPRS;
24491 else
24492 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24494 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24495 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24498 else if (DEFAULT_ABI == ABI_DARWIN)
24500 if (info->first_fp_reg_save > 60)
24501 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24502 if (info->first_gp_reg_save > 29)
24503 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24504 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24506 else
24508 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24509 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24510 || info->first_fp_reg_save > 61)
24511 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24512 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24513 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24516 /* Don't bother to try to save things out-of-line if r11 is occupied
24517 by the static chain. It would require too much fiddling and the
24518 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24519 pointer on Darwin, and AIX uses r1 or r12. */
24520 if (using_static_chain_p
24521 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24522 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24523 | SAVE_INLINE_GPRS
24524 | SAVE_INLINE_VRS);
24526 /* Don't ever restore fixed regs. That means we can't use the
24527 out-of-line register restore functions if a fixed reg is in the
24528 range of regs restored. */
24529 if (!(strategy & REST_INLINE_FPRS))
24530 for (int i = info->first_fp_reg_save; i < 64; i++)
24531 if (fixed_regs[i])
24533 strategy |= REST_INLINE_FPRS;
24534 break;
24537 /* We can only use the out-of-line routines to restore fprs if we've
24538 saved all the registers from first_fp_reg_save in the prologue.
24539 Otherwise, we risk loading garbage. Of course, if we have saved
24540 out-of-line then we know we haven't skipped any fprs. */
24541 if ((strategy & SAVE_INLINE_FPRS)
24542 && !(strategy & REST_INLINE_FPRS))
24543 for (int i = info->first_fp_reg_save; i < 64; i++)
24544 if (!save_reg_p (i))
24546 strategy |= REST_INLINE_FPRS;
24547 break;
24550 /* Similarly, for altivec regs. */
24551 if (!(strategy & REST_INLINE_VRS))
24552 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24553 if (fixed_regs[i])
24555 strategy |= REST_INLINE_VRS;
24556 break;
24559 if ((strategy & SAVE_INLINE_VRS)
24560 && !(strategy & REST_INLINE_VRS))
24561 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24562 if (!save_reg_p (i))
24564 strategy |= REST_INLINE_VRS;
24565 break;
24568 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24569 saved is an out-of-line save or restore. Set up the value for
24570 the next test (excluding out-of-line gprs). */
24571 bool lr_save_p = (info->lr_save_p
24572 || !(strategy & SAVE_INLINE_FPRS)
24573 || !(strategy & SAVE_INLINE_VRS)
24574 || !(strategy & REST_INLINE_FPRS)
24575 || !(strategy & REST_INLINE_VRS));
24577 if (TARGET_MULTIPLE
24578 && !TARGET_POWERPC64
24579 && info->first_gp_reg_save < 31
24580 && !(flag_shrink_wrap
24581 && flag_shrink_wrap_separate
24582 && optimize_function_for_speed_p (cfun)))
24584 int count = 0;
24585 for (int i = info->first_gp_reg_save; i < 32; i++)
24586 if (save_reg_p (i))
24587 count++;
24589 if (count <= 1)
24590 /* Don't use store multiple if only one reg needs to be
24591 saved. This can occur for example when the ABI_V4 pic reg
24592 (r30) needs to be saved to make calls, but r31 is not
24593 used. */
24594 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24595 else
24597 /* Prefer store multiple for saves over out-of-line
24598 routines, since the store-multiple instruction will
24599 always be smaller. */
24600 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24602 /* The situation is more complicated with load multiple.
24603 We'd prefer to use the out-of-line routines for restores,
24604 since the "exit" out-of-line routines can handle the
24605 restore of LR and the frame teardown. However if doesn't
24606 make sense to use the out-of-line routine if that is the
24607 only reason we'd need to save LR, and we can't use the
24608 "exit" out-of-line gpr restore if we have saved some
24609 fprs; In those cases it is advantageous to use load
24610 multiple when available. */
24611 if (info->first_fp_reg_save != 64 || !lr_save_p)
24612 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24616 /* Using the "exit" out-of-line routine does not improve code size
24617 if using it would require lr to be saved and if only saving one
24618 or two gprs. */
24619 else if (!lr_save_p && info->first_gp_reg_save > 29)
24620 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24622 /* Don't ever restore fixed regs. */
24623 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24624 for (int i = info->first_gp_reg_save; i < 32; i++)
24625 if (fixed_reg_p (i))
24627 strategy |= REST_INLINE_GPRS;
24628 strategy &= ~REST_MULTIPLE;
24629 break;
24632 /* We can only use load multiple or the out-of-line routines to
24633 restore gprs if we've saved all the registers from
24634 first_gp_reg_save. Otherwise, we risk loading garbage.
24635 Of course, if we have saved out-of-line or used stmw then we know
24636 we haven't skipped any gprs. */
24637 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24638 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24639 for (int i = info->first_gp_reg_save; i < 32; i++)
24640 if (!save_reg_p (i))
24642 strategy |= REST_INLINE_GPRS;
24643 strategy &= ~REST_MULTIPLE;
24644 break;
24647 if (TARGET_ELF && TARGET_64BIT)
24649 if (!(strategy & SAVE_INLINE_FPRS))
24650 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24651 else if (!(strategy & SAVE_INLINE_GPRS)
24652 && info->first_fp_reg_save == 64)
24653 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24655 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24656 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24658 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24659 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24661 return strategy;
24664 /* Calculate the stack information for the current function. This is
24665 complicated by having two separate calling sequences, the AIX calling
24666 sequence and the V.4 calling sequence.
24668 AIX (and Darwin/Mac OS X) stack frames look like:
24669 32-bit 64-bit
24670 SP----> +---------------------------------------+
24671 | back chain to caller | 0 0
24672 +---------------------------------------+
24673 | saved CR | 4 8 (8-11)
24674 +---------------------------------------+
24675 | saved LR | 8 16
24676 +---------------------------------------+
24677 | reserved for compilers | 12 24
24678 +---------------------------------------+
24679 | reserved for binders | 16 32
24680 +---------------------------------------+
24681 | saved TOC pointer | 20 40
24682 +---------------------------------------+
24683 | Parameter save area (+padding*) (P) | 24 48
24684 +---------------------------------------+
24685 | Alloca space (A) | 24+P etc.
24686 +---------------------------------------+
24687 | Local variable space (L) | 24+P+A
24688 +---------------------------------------+
24689 | Float/int conversion temporary (X) | 24+P+A+L
24690 +---------------------------------------+
24691 | Save area for AltiVec registers (W) | 24+P+A+L+X
24692 +---------------------------------------+
24693 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24694 +---------------------------------------+
24695 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24696 +---------------------------------------+
24697 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24698 +---------------------------------------+
24699 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24700 +---------------------------------------+
24701 old SP->| back chain to caller's caller |
24702 +---------------------------------------+
24704 * If the alloca area is present, the parameter save area is
24705 padded so that the former starts 16-byte aligned.
24707 The required alignment for AIX configurations is two words (i.e., 8
24708 or 16 bytes).
24710 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24712 SP----> +---------------------------------------+
24713 | Back chain to caller | 0
24714 +---------------------------------------+
24715 | Save area for CR | 8
24716 +---------------------------------------+
24717 | Saved LR | 16
24718 +---------------------------------------+
24719 | Saved TOC pointer | 24
24720 +---------------------------------------+
24721 | Parameter save area (+padding*) (P) | 32
24722 +---------------------------------------+
24723 | Alloca space (A) | 32+P
24724 +---------------------------------------+
24725 | Local variable space (L) | 32+P+A
24726 +---------------------------------------+
24727 | Save area for AltiVec registers (W) | 32+P+A+L
24728 +---------------------------------------+
24729 | AltiVec alignment padding (Y) | 32+P+A+L+W
24730 +---------------------------------------+
24731 | Save area for GP registers (G) | 32+P+A+L+W+Y
24732 +---------------------------------------+
24733 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24734 +---------------------------------------+
24735 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24736 +---------------------------------------+
24738 * If the alloca area is present, the parameter save area is
24739 padded so that the former starts 16-byte aligned.
24741 V.4 stack frames look like:
24743 SP----> +---------------------------------------+
24744 | back chain to caller | 0
24745 +---------------------------------------+
24746 | caller's saved LR | 4
24747 +---------------------------------------+
24748 | Parameter save area (+padding*) (P) | 8
24749 +---------------------------------------+
24750 | Alloca space (A) | 8+P
24751 +---------------------------------------+
24752 | Varargs save area (V) | 8+P+A
24753 +---------------------------------------+
24754 | Local variable space (L) | 8+P+A+V
24755 +---------------------------------------+
24756 | Float/int conversion temporary (X) | 8+P+A+V+L
24757 +---------------------------------------+
24758 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24759 +---------------------------------------+
24760 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24761 +---------------------------------------+
24762 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24763 +---------------------------------------+
24764 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24765 +---------------------------------------+
24766 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24767 +---------------------------------------+
24768 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24769 +---------------------------------------+
24770 old SP->| back chain to caller's caller |
24771 +---------------------------------------+
24773 * If the alloca area is present and the required alignment is
24774 16 bytes, the parameter save area is padded so that the
24775 alloca area starts 16-byte aligned.
24777 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24778 given. (But note below and in sysv4.h that we require only 8 and
24779 may round up the size of our stack frame anyways. The historical
24780 reason is early versions of powerpc-linux which didn't properly
24781 align the stack at program startup. A happy side-effect is that
24782 -mno-eabi libraries can be used with -meabi programs.)
24784 The EABI configuration defaults to the V.4 layout. However,
24785 the stack alignment requirements may differ. If -mno-eabi is not
24786 given, the required stack alignment is 8 bytes; if -mno-eabi is
24787 given, the required alignment is 16 bytes. (But see V.4 comment
24788 above.) */
24790 #ifndef ABI_STACK_BOUNDARY
24791 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24792 #endif
24794 static rs6000_stack_t *
24795 rs6000_stack_info (void)
24797 /* We should never be called for thunks, we are not set up for that. */
24798 gcc_assert (!cfun->is_thunk);
24800 rs6000_stack_t *info = &stack_info;
24801 int reg_size = TARGET_32BIT ? 4 : 8;
24802 int ehrd_size;
24803 int ehcr_size;
24804 int save_align;
24805 int first_gp;
24806 HOST_WIDE_INT non_fixed_size;
24807 bool using_static_chain_p;
24809 if (reload_completed && info->reload_completed)
24810 return info;
24812 memset (info, 0, sizeof (*info));
24813 info->reload_completed = reload_completed;
24815 /* Select which calling sequence. */
24816 info->abi = DEFAULT_ABI;
24818 /* Calculate which registers need to be saved & save area size. */
24819 info->first_gp_reg_save = first_reg_to_save ();
24820 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24821 even if it currently looks like we won't. Reload may need it to
24822 get at a constant; if so, it will have already created a constant
24823 pool entry for it. */
24824 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24825 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24826 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24827 && crtl->uses_const_pool
24828 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24829 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24830 else
24831 first_gp = info->first_gp_reg_save;
24833 info->gp_size = reg_size * (32 - first_gp);
24835 info->first_fp_reg_save = first_fp_reg_to_save ();
24836 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24838 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24839 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24840 - info->first_altivec_reg_save);
24842 /* Does this function call anything? */
24843 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24845 /* Determine if we need to save the condition code registers. */
24846 if (save_reg_p (CR2_REGNO)
24847 || save_reg_p (CR3_REGNO)
24848 || save_reg_p (CR4_REGNO))
24850 info->cr_save_p = 1;
24851 if (DEFAULT_ABI == ABI_V4)
24852 info->cr_size = reg_size;
24855 /* If the current function calls __builtin_eh_return, then we need
24856 to allocate stack space for registers that will hold data for
24857 the exception handler. */
24858 if (crtl->calls_eh_return)
24860 unsigned int i;
24861 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24862 continue;
24864 ehrd_size = i * UNITS_PER_WORD;
24866 else
24867 ehrd_size = 0;
24869 /* In the ELFv2 ABI, we also need to allocate space for separate
24870 CR field save areas if the function calls __builtin_eh_return. */
24871 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24873 /* This hard-codes that we have three call-saved CR fields. */
24874 ehcr_size = 3 * reg_size;
24875 /* We do *not* use the regular CR save mechanism. */
24876 info->cr_save_p = 0;
24878 else
24879 ehcr_size = 0;
24881 /* Determine various sizes. */
24882 info->reg_size = reg_size;
24883 info->fixed_size = RS6000_SAVE_AREA;
24884 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24885 if (cfun->calls_alloca)
24886 info->parm_size =
24887 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24888 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24889 else
24890 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24891 TARGET_ALTIVEC ? 16 : 8);
24892 if (FRAME_GROWS_DOWNWARD)
24893 info->vars_size
24894 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24895 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24896 - (info->fixed_size + info->vars_size + info->parm_size);
24898 if (TARGET_ALTIVEC_ABI)
24899 info->vrsave_mask = compute_vrsave_mask ();
24901 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24902 info->vrsave_size = 4;
24904 compute_save_world_info (info);
24906 /* Calculate the offsets. */
24907 switch (DEFAULT_ABI)
24909 case ABI_NONE:
24910 default:
24911 gcc_unreachable ();
24913 case ABI_AIX:
24914 case ABI_ELFv2:
24915 case ABI_DARWIN:
24916 info->fp_save_offset = -info->fp_size;
24917 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24919 if (TARGET_ALTIVEC_ABI)
24921 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24923 /* Align stack so vector save area is on a quadword boundary.
24924 The padding goes above the vectors. */
24925 if (info->altivec_size != 0)
24926 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24928 info->altivec_save_offset = info->vrsave_save_offset
24929 - info->altivec_padding_size
24930 - info->altivec_size;
24931 gcc_assert (info->altivec_size == 0
24932 || info->altivec_save_offset % 16 == 0);
24934 /* Adjust for AltiVec case. */
24935 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24937 else
24938 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24940 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24941 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24942 info->lr_save_offset = 2*reg_size;
24943 break;
24945 case ABI_V4:
24946 info->fp_save_offset = -info->fp_size;
24947 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24948 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24950 if (TARGET_ALTIVEC_ABI)
24952 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24954 /* Align stack so vector save area is on a quadword boundary. */
24955 if (info->altivec_size != 0)
24956 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24958 info->altivec_save_offset = info->vrsave_save_offset
24959 - info->altivec_padding_size
24960 - info->altivec_size;
24962 /* Adjust for AltiVec case. */
24963 info->ehrd_offset = info->altivec_save_offset;
24965 else
24966 info->ehrd_offset = info->cr_save_offset;
24968 info->ehrd_offset -= ehrd_size;
24969 info->lr_save_offset = reg_size;
24972 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24973 info->save_size = RS6000_ALIGN (info->fp_size
24974 + info->gp_size
24975 + info->altivec_size
24976 + info->altivec_padding_size
24977 + ehrd_size
24978 + ehcr_size
24979 + info->cr_size
24980 + info->vrsave_size,
24981 save_align);
24983 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24985 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24986 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24988 /* Determine if we need to save the link register. */
24989 if (info->calls_p
24990 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24991 && crtl->profile
24992 && !TARGET_PROFILE_KERNEL)
24993 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24994 #ifdef TARGET_RELOCATABLE
24995 || (DEFAULT_ABI == ABI_V4
24996 && (TARGET_RELOCATABLE || flag_pic > 1)
24997 && !constant_pool_empty_p ())
24998 #endif
24999 || rs6000_ra_ever_killed ())
25000 info->lr_save_p = 1;
25002 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
25003 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
25004 && call_used_regs[STATIC_CHAIN_REGNUM]);
25005 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
25007 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
25008 || !(info->savres_strategy & SAVE_INLINE_FPRS)
25009 || !(info->savres_strategy & SAVE_INLINE_VRS)
25010 || !(info->savres_strategy & REST_INLINE_GPRS)
25011 || !(info->savres_strategy & REST_INLINE_FPRS)
25012 || !(info->savres_strategy & REST_INLINE_VRS))
25013 info->lr_save_p = 1;
25015 if (info->lr_save_p)
25016 df_set_regs_ever_live (LR_REGNO, true);
25018 /* Determine if we need to allocate any stack frame:
25020 For AIX we need to push the stack if a frame pointer is needed
25021 (because the stack might be dynamically adjusted), if we are
25022 debugging, if we make calls, or if the sum of fp_save, gp_save,
25023 and local variables are more than the space needed to save all
25024 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
25025 + 18*8 = 288 (GPR13 reserved).
25027 For V.4 we don't have the stack cushion that AIX uses, but assume
25028 that the debugger can handle stackless frames. */
25030 if (info->calls_p)
25031 info->push_p = 1;
25033 else if (DEFAULT_ABI == ABI_V4)
25034 info->push_p = non_fixed_size != 0;
25036 else if (frame_pointer_needed)
25037 info->push_p = 1;
25039 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
25040 info->push_p = 1;
25042 else
25043 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
25045 return info;
25048 static void
25049 debug_stack_info (rs6000_stack_t *info)
25051 const char *abi_string;
25053 if (! info)
25054 info = rs6000_stack_info ();
25056 fprintf (stderr, "\nStack information for function %s:\n",
25057 ((current_function_decl && DECL_NAME (current_function_decl))
25058 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
25059 : "<unknown>"));
25061 switch (info->abi)
25063 default: abi_string = "Unknown"; break;
25064 case ABI_NONE: abi_string = "NONE"; break;
25065 case ABI_AIX: abi_string = "AIX"; break;
25066 case ABI_ELFv2: abi_string = "ELFv2"; break;
25067 case ABI_DARWIN: abi_string = "Darwin"; break;
25068 case ABI_V4: abi_string = "V.4"; break;
25071 fprintf (stderr, "\tABI = %5s\n", abi_string);
25073 if (TARGET_ALTIVEC_ABI)
25074 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
25076 if (info->first_gp_reg_save != 32)
25077 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
25079 if (info->first_fp_reg_save != 64)
25080 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
25082 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
25083 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
25084 info->first_altivec_reg_save);
25086 if (info->lr_save_p)
25087 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
25089 if (info->cr_save_p)
25090 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
25092 if (info->vrsave_mask)
25093 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
25095 if (info->push_p)
25096 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25098 if (info->calls_p)
25099 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25101 if (info->gp_size)
25102 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25104 if (info->fp_size)
25105 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25107 if (info->altivec_size)
25108 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25109 info->altivec_save_offset);
25111 if (info->vrsave_size)
25112 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25113 info->vrsave_save_offset);
25115 if (info->lr_save_p)
25116 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25118 if (info->cr_save_p)
25119 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25121 if (info->varargs_save_offset)
25122 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25124 if (info->total_size)
25125 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25126 info->total_size);
25128 if (info->vars_size)
25129 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25130 info->vars_size);
25132 if (info->parm_size)
25133 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25135 if (info->fixed_size)
25136 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25138 if (info->gp_size)
25139 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25141 if (info->fp_size)
25142 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25144 if (info->altivec_size)
25145 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25147 if (info->vrsave_size)
25148 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25150 if (info->altivec_padding_size)
25151 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25152 info->altivec_padding_size);
25154 if (info->cr_size)
25155 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25157 if (info->save_size)
25158 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25160 if (info->reg_size != 4)
25161 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25163 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25165 fprintf (stderr, "\n");
25169 rs6000_return_addr (int count, rtx frame)
25171 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25172 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25173 if (count != 0
25174 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25176 cfun->machine->ra_needs_full_frame = 1;
25178 if (count == 0)
25179 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25180 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25181 frame = stack_pointer_rtx;
25182 rtx prev_frame_addr = memory_address (Pmode, frame);
25183 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25184 rtx lr_save_off = plus_constant (Pmode,
25185 prev_frame, RETURN_ADDRESS_OFFSET);
25186 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25187 return gen_rtx_MEM (Pmode, lr_save_addr);
25190 cfun->machine->ra_need_lr = 1;
25191 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25194 /* Say whether a function is a candidate for sibcall handling or not. */
25196 static bool
25197 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25199 tree fntype;
25201 if (decl)
25202 fntype = TREE_TYPE (decl);
25203 else
25204 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25206 /* We can't do it if the called function has more vector parameters
25207 than the current function; there's nowhere to put the VRsave code. */
25208 if (TARGET_ALTIVEC_ABI
25209 && TARGET_ALTIVEC_VRSAVE
25210 && !(decl && decl == current_function_decl))
25212 function_args_iterator args_iter;
25213 tree type;
25214 int nvreg = 0;
25216 /* Functions with vector parameters are required to have a
25217 prototype, so the argument type info must be available
25218 here. */
25219 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25220 if (TREE_CODE (type) == VECTOR_TYPE
25221 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25222 nvreg++;
25224 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25225 if (TREE_CODE (type) == VECTOR_TYPE
25226 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25227 nvreg--;
25229 if (nvreg > 0)
25230 return false;
25233 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25234 functions, because the callee may have a different TOC pointer to
25235 the caller and there's no way to ensure we restore the TOC when
25236 we return. With the secure-plt SYSV ABI we can't make non-local
25237 calls when -fpic/PIC because the plt call stubs use r30. */
25238 if (DEFAULT_ABI == ABI_DARWIN
25239 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25240 && decl
25241 && !DECL_EXTERNAL (decl)
25242 && !DECL_WEAK (decl)
25243 && (*targetm.binds_local_p) (decl))
25244 || (DEFAULT_ABI == ABI_V4
25245 && (!TARGET_SECURE_PLT
25246 || !flag_pic
25247 || (decl
25248 && (*targetm.binds_local_p) (decl)))))
25250 tree attr_list = TYPE_ATTRIBUTES (fntype);
25252 if (!lookup_attribute ("longcall", attr_list)
25253 || lookup_attribute ("shortcall", attr_list))
25254 return true;
25257 return false;
25260 static int
25261 rs6000_ra_ever_killed (void)
25263 rtx_insn *top;
25264 rtx reg;
25265 rtx_insn *insn;
25267 if (cfun->is_thunk)
25268 return 0;
25270 if (cfun->machine->lr_save_state)
25271 return cfun->machine->lr_save_state - 1;
25273 /* regs_ever_live has LR marked as used if any sibcalls are present,
25274 but this should not force saving and restoring in the
25275 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25276 clobbers LR, so that is inappropriate. */
25278 /* Also, the prologue can generate a store into LR that
25279 doesn't really count, like this:
25281 move LR->R0
25282 bcl to set PIC register
25283 move LR->R31
25284 move R0->LR
25286 When we're called from the epilogue, we need to avoid counting
25287 this as a store. */
25289 push_topmost_sequence ();
25290 top = get_insns ();
25291 pop_topmost_sequence ();
25292 reg = gen_rtx_REG (Pmode, LR_REGNO);
25294 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25296 if (INSN_P (insn))
25298 if (CALL_P (insn))
25300 if (!SIBLING_CALL_P (insn))
25301 return 1;
25303 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25304 return 1;
25305 else if (set_of (reg, insn) != NULL_RTX
25306 && !prologue_epilogue_contains (insn))
25307 return 1;
25310 return 0;
25313 /* Emit instructions needed to load the TOC register.
25314 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25315 a constant pool; or for SVR4 -fpic. */
25317 void
25318 rs6000_emit_load_toc_table (int fromprolog)
25320 rtx dest;
25321 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25323 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25325 char buf[30];
25326 rtx lab, tmp1, tmp2, got;
25328 lab = gen_label_rtx ();
25329 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25330 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25331 if (flag_pic == 2)
25333 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25334 need_toc_init = 1;
25336 else
25337 got = rs6000_got_sym ();
25338 tmp1 = tmp2 = dest;
25339 if (!fromprolog)
25341 tmp1 = gen_reg_rtx (Pmode);
25342 tmp2 = gen_reg_rtx (Pmode);
25344 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25345 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25346 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25347 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25349 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25351 emit_insn (gen_load_toc_v4_pic_si ());
25352 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25354 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25356 char buf[30];
25357 rtx temp0 = (fromprolog
25358 ? gen_rtx_REG (Pmode, 0)
25359 : gen_reg_rtx (Pmode));
25361 if (fromprolog)
25363 rtx symF, symL;
25365 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25366 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25368 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25369 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25371 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25372 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25373 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25375 else
25377 rtx tocsym, lab;
25379 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25380 need_toc_init = 1;
25381 lab = gen_label_rtx ();
25382 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25383 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25384 if (TARGET_LINK_STACK)
25385 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25386 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25388 emit_insn (gen_addsi3 (dest, temp0, dest));
25390 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25392 /* This is for AIX code running in non-PIC ELF32. */
25393 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25395 need_toc_init = 1;
25396 emit_insn (gen_elf_high (dest, realsym));
25397 emit_insn (gen_elf_low (dest, dest, realsym));
25399 else
25401 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25403 if (TARGET_32BIT)
25404 emit_insn (gen_load_toc_aix_si (dest));
25405 else
25406 emit_insn (gen_load_toc_aix_di (dest));
25410 /* Emit instructions to restore the link register after determining where
25411 its value has been stored. */
25413 void
25414 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25416 rs6000_stack_t *info = rs6000_stack_info ();
25417 rtx operands[2];
25419 operands[0] = source;
25420 operands[1] = scratch;
25422 if (info->lr_save_p)
25424 rtx frame_rtx = stack_pointer_rtx;
25425 HOST_WIDE_INT sp_offset = 0;
25426 rtx tmp;
25428 if (frame_pointer_needed
25429 || cfun->calls_alloca
25430 || info->total_size > 32767)
25432 tmp = gen_frame_mem (Pmode, frame_rtx);
25433 emit_move_insn (operands[1], tmp);
25434 frame_rtx = operands[1];
25436 else if (info->push_p)
25437 sp_offset = info->total_size;
25439 tmp = plus_constant (Pmode, frame_rtx,
25440 info->lr_save_offset + sp_offset);
25441 tmp = gen_frame_mem (Pmode, tmp);
25442 emit_move_insn (tmp, operands[0]);
25444 else
25445 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25447 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25448 state of lr_save_p so any change from here on would be a bug. In
25449 particular, stop rs6000_ra_ever_killed from considering the SET
25450 of lr we may have added just above. */
25451 cfun->machine->lr_save_state = info->lr_save_p + 1;
25454 static GTY(()) alias_set_type set = -1;
25456 alias_set_type
25457 get_TOC_alias_set (void)
25459 if (set == -1)
25460 set = new_alias_set ();
25461 return set;
25464 /* This returns nonzero if the current function uses the TOC. This is
25465 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25466 is generated by the ABI_V4 load_toc_* patterns.
25467 Return 2 instead of 1 if the load_toc_* pattern is in the function
25468 partition that doesn't start the function. */
25469 #if TARGET_ELF
25470 static int
25471 uses_TOC (void)
25473 rtx_insn *insn;
25474 int ret = 1;
25476 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25478 if (INSN_P (insn))
25480 rtx pat = PATTERN (insn);
25481 int i;
25483 if (GET_CODE (pat) == PARALLEL)
25484 for (i = 0; i < XVECLEN (pat, 0); i++)
25486 rtx sub = XVECEXP (pat, 0, i);
25487 if (GET_CODE (sub) == USE)
25489 sub = XEXP (sub, 0);
25490 if (GET_CODE (sub) == UNSPEC
25491 && XINT (sub, 1) == UNSPEC_TOC)
25492 return ret;
25496 else if (crtl->has_bb_partition
25497 && NOTE_P (insn)
25498 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25499 ret = 2;
25501 return 0;
25503 #endif
25506 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25508 rtx tocrel, tocreg, hi;
25510 if (TARGET_DEBUG_ADDR)
25512 if (GET_CODE (symbol) == SYMBOL_REF)
25513 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25514 XSTR (symbol, 0));
25515 else
25517 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25518 GET_RTX_NAME (GET_CODE (symbol)));
25519 debug_rtx (symbol);
25523 if (!can_create_pseudo_p ())
25524 df_set_regs_ever_live (TOC_REGISTER, true);
25526 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25527 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25528 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25529 return tocrel;
25531 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25532 if (largetoc_reg != NULL)
25534 emit_move_insn (largetoc_reg, hi);
25535 hi = largetoc_reg;
25537 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25540 /* Issue assembly directives that create a reference to the given DWARF
25541 FRAME_TABLE_LABEL from the current function section. */
25542 void
25543 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25545 fprintf (asm_out_file, "\t.ref %s\n",
25546 (* targetm.strip_name_encoding) (frame_table_label));
25549 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25550 and the change to the stack pointer. */
25552 static void
25553 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25555 rtvec p;
25556 int i;
25557 rtx regs[3];
25559 i = 0;
25560 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25561 if (hard_frame_needed)
25562 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25563 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25564 || (hard_frame_needed
25565 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25566 regs[i++] = fp;
25568 p = rtvec_alloc (i);
25569 while (--i >= 0)
25571 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25572 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25575 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25578 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25579 and set the appropriate attributes for the generated insn. Return the
25580 first insn which adjusts the stack pointer or the last insn before
25581 the stack adjustment loop.
25583 SIZE_INT is used to create the CFI note for the allocation.
25585 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25586 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25588 ORIG_SP contains the backchain value that must be stored at *sp. */
25590 static rtx_insn *
25591 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25593 rtx_insn *insn;
25595 rtx size_rtx = GEN_INT (-size_int);
25596 if (size_int > 32767)
25598 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25599 /* Need a note here so that try_split doesn't get confused. */
25600 if (get_last_insn () == NULL_RTX)
25601 emit_note (NOTE_INSN_DELETED);
25602 insn = emit_move_insn (tmp_reg, size_rtx);
25603 try_split (PATTERN (insn), insn, 0);
25604 size_rtx = tmp_reg;
25607 if (Pmode == SImode)
25608 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25609 stack_pointer_rtx,
25610 size_rtx,
25611 orig_sp));
25612 else
25613 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25614 stack_pointer_rtx,
25615 size_rtx,
25616 orig_sp));
25617 rtx par = PATTERN (insn);
25618 gcc_assert (GET_CODE (par) == PARALLEL);
25619 rtx set = XVECEXP (par, 0, 0);
25620 gcc_assert (GET_CODE (set) == SET);
25621 rtx mem = SET_DEST (set);
25622 gcc_assert (MEM_P (mem));
25623 MEM_NOTRAP_P (mem) = 1;
25624 set_mem_alias_set (mem, get_frame_alias_set ());
25626 RTX_FRAME_RELATED_P (insn) = 1;
25627 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25628 gen_rtx_SET (stack_pointer_rtx,
25629 gen_rtx_PLUS (Pmode,
25630 stack_pointer_rtx,
25631 GEN_INT (-size_int))));
25633 /* Emit a blockage to ensure the allocation/probing insns are
25634 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25635 note for similar reasons. */
25636 if (flag_stack_clash_protection)
25638 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25639 emit_insn (gen_blockage ());
25642 return insn;
25645 static HOST_WIDE_INT
25646 get_stack_clash_protection_probe_interval (void)
25648 return (HOST_WIDE_INT_1U
25649 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25652 static HOST_WIDE_INT
25653 get_stack_clash_protection_guard_size (void)
25655 return (HOST_WIDE_INT_1U
25656 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25659 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25660 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25662 COPY_REG, if non-null, should contain a copy of the original
25663 stack pointer at exit from this function.
25665 This is subtly different than the Ada probing in that it tries hard to
25666 prevent attacks that jump the stack guard. Thus it is never allowed to
25667 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25668 space without a suitable probe. */
25669 static rtx_insn *
25670 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25671 rtx copy_reg)
25673 rtx orig_sp = copy_reg;
25675 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25677 /* Round the size down to a multiple of PROBE_INTERVAL. */
25678 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25680 /* If explicitly requested,
25681 or the rounded size is not the same as the original size
25682 or the the rounded size is greater than a page,
25683 then we will need a copy of the original stack pointer. */
25684 if (rounded_size != orig_size
25685 || rounded_size > probe_interval
25686 || copy_reg)
25688 /* If the caller did not request a copy of the incoming stack
25689 pointer, then we use r0 to hold the copy. */
25690 if (!copy_reg)
25691 orig_sp = gen_rtx_REG (Pmode, 0);
25692 emit_move_insn (orig_sp, stack_pointer_rtx);
25695 /* There's three cases here.
25697 One is a single probe which is the most common and most efficiently
25698 implemented as it does not have to have a copy of the original
25699 stack pointer if there are no residuals.
25701 Second is unrolled allocation/probes which we use if there's just
25702 a few of them. It needs to save the original stack pointer into a
25703 temporary for use as a source register in the allocation/probe.
25705 Last is a loop. This is the most uncommon case and least efficient. */
25706 rtx_insn *retval = NULL;
25707 if (rounded_size == probe_interval)
25709 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25711 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25713 else if (rounded_size <= 8 * probe_interval)
25715 /* The ABI requires using the store with update insns to allocate
25716 space and store the backchain into the stack
25718 So we save the current stack pointer into a temporary, then
25719 emit the store-with-update insns to store the saved stack pointer
25720 into the right location in each new page. */
25721 for (int i = 0; i < rounded_size; i += probe_interval)
25723 rtx_insn *insn
25724 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25726 /* Save the first stack adjustment in RETVAL. */
25727 if (i == 0)
25728 retval = insn;
25731 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25733 else
25735 /* Compute the ending address. */
25736 rtx end_addr
25737 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25738 rtx rs = GEN_INT (-rounded_size);
25739 rtx_insn *insn;
25740 if (add_operand (rs, Pmode))
25741 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25742 else
25744 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25745 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25746 stack_pointer_rtx));
25747 /* Describe the effect of INSN to the CFI engine. */
25748 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25749 gen_rtx_SET (end_addr,
25750 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25751 rs)));
25753 RTX_FRAME_RELATED_P (insn) = 1;
25755 /* Emit the loop. */
25756 if (TARGET_64BIT)
25757 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25758 stack_pointer_rtx, orig_sp,
25759 end_addr));
25760 else
25761 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25762 stack_pointer_rtx, orig_sp,
25763 end_addr));
25764 RTX_FRAME_RELATED_P (retval) = 1;
25765 /* Describe the effect of INSN to the CFI engine. */
25766 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25767 gen_rtx_SET (stack_pointer_rtx, end_addr));
25769 /* Emit a blockage to ensure the allocation/probing insns are
25770 not optimized, combined, removed, etc. Other cases handle this
25771 within their call to rs6000_emit_allocate_stack_1. */
25772 emit_insn (gen_blockage ());
25774 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25777 if (orig_size != rounded_size)
25779 /* Allocate (and implicitly probe) any residual space. */
25780 HOST_WIDE_INT residual = orig_size - rounded_size;
25782 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25784 /* If the residual was the only allocation, then we can return the
25785 allocating insn. */
25786 if (!retval)
25787 retval = insn;
25790 return retval;
25793 /* Emit the correct code for allocating stack space, as insns.
25794 If COPY_REG, make sure a copy of the old frame is left there.
25795 The generated code may use hard register 0 as a temporary. */
25797 static rtx_insn *
25798 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25800 rtx_insn *insn;
25801 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25802 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25803 rtx todec = gen_int_mode (-size, Pmode);
25805 if (INTVAL (todec) != -size)
25807 warning (0, "stack frame too large");
25808 emit_insn (gen_trap ());
25809 return 0;
25812 if (crtl->limit_stack)
25814 if (REG_P (stack_limit_rtx)
25815 && REGNO (stack_limit_rtx) > 1
25816 && REGNO (stack_limit_rtx) <= 31)
25818 rtx_insn *insn
25819 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25820 gcc_assert (insn);
25821 emit_insn (insn);
25822 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25824 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25825 && TARGET_32BIT
25826 && DEFAULT_ABI == ABI_V4
25827 && !flag_pic)
25829 rtx toload = gen_rtx_CONST (VOIDmode,
25830 gen_rtx_PLUS (Pmode,
25831 stack_limit_rtx,
25832 GEN_INT (size)));
25834 emit_insn (gen_elf_high (tmp_reg, toload));
25835 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25836 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25837 const0_rtx));
25839 else
25840 warning (0, "stack limit expression is not supported");
25843 if (flag_stack_clash_protection)
25845 if (size < get_stack_clash_protection_guard_size ())
25846 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25847 else
25849 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25850 copy_reg);
25852 /* If we asked for a copy with an offset, then we still need add in
25853 the offset. */
25854 if (copy_reg && copy_off)
25855 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25856 return insn;
25860 if (copy_reg)
25862 if (copy_off != 0)
25863 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25864 else
25865 emit_move_insn (copy_reg, stack_reg);
25868 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25869 it now and set the alias set/attributes. The above gen_*_update
25870 calls will generate a PARALLEL with the MEM set being the first
25871 operation. */
25872 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25873 return insn;
25876 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25878 #if PROBE_INTERVAL > 32768
25879 #error Cannot use indexed addressing mode for stack probing
25880 #endif
25882 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25883 inclusive. These are offsets from the current stack pointer. */
25885 static void
25886 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25888 /* See if we have a constant small number of probes to generate. If so,
25889 that's the easy case. */
25890 if (first + size <= 32768)
25892 HOST_WIDE_INT i;
25894 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25895 it exceeds SIZE. If only one probe is needed, this will not
25896 generate any code. Then probe at FIRST + SIZE. */
25897 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25898 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25899 -(first + i)));
25901 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25902 -(first + size)));
25905 /* Otherwise, do the same as above, but in a loop. Note that we must be
25906 extra careful with variables wrapping around because we might be at
25907 the very top (or the very bottom) of the address space and we have
25908 to be able to handle this case properly; in particular, we use an
25909 equality test for the loop condition. */
25910 else
25912 HOST_WIDE_INT rounded_size;
25913 rtx r12 = gen_rtx_REG (Pmode, 12);
25914 rtx r0 = gen_rtx_REG (Pmode, 0);
25916 /* Sanity check for the addressing mode we're going to use. */
25917 gcc_assert (first <= 32768);
25919 /* Step 1: round SIZE to the previous multiple of the interval. */
25921 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25924 /* Step 2: compute initial and final value of the loop counter. */
25926 /* TEST_ADDR = SP + FIRST. */
25927 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25928 -first)));
25930 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25931 if (rounded_size > 32768)
25933 emit_move_insn (r0, GEN_INT (-rounded_size));
25934 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25936 else
25937 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25938 -rounded_size)));
25941 /* Step 3: the loop
25945 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25946 probe at TEST_ADDR
25948 while (TEST_ADDR != LAST_ADDR)
25950 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25951 until it is equal to ROUNDED_SIZE. */
25953 if (TARGET_64BIT)
25954 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25955 else
25956 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25959 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25960 that SIZE is equal to ROUNDED_SIZE. */
25962 if (size != rounded_size)
25963 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25967 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25968 addresses, not offsets. */
25970 static const char *
25971 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25973 static int labelno = 0;
25974 char loop_lab[32];
25975 rtx xops[2];
25977 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25979 /* Loop. */
25980 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25982 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25983 xops[0] = reg1;
25984 xops[1] = GEN_INT (-PROBE_INTERVAL);
25985 output_asm_insn ("addi %0,%0,%1", xops);
25987 /* Probe at TEST_ADDR. */
25988 xops[1] = gen_rtx_REG (Pmode, 0);
25989 output_asm_insn ("stw %1,0(%0)", xops);
25991 /* Test if TEST_ADDR == LAST_ADDR. */
25992 xops[1] = reg2;
25993 if (TARGET_64BIT)
25994 output_asm_insn ("cmpd 0,%0,%1", xops);
25995 else
25996 output_asm_insn ("cmpw 0,%0,%1", xops);
25998 /* Branch. */
25999 fputs ("\tbne 0,", asm_out_file);
26000 assemble_name_raw (asm_out_file, loop_lab);
26001 fputc ('\n', asm_out_file);
26003 return "";
26006 /* This function is called when rs6000_frame_related is processing
26007 SETs within a PARALLEL, and returns whether the REGNO save ought to
26008 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
26009 for out-of-line register save functions, store multiple, and the
26010 Darwin world_save. They may contain registers that don't really
26011 need saving. */
26013 static bool
26014 interesting_frame_related_regno (unsigned int regno)
26016 /* Saves apparently of r0 are actually saving LR. It doesn't make
26017 sense to substitute the regno here to test save_reg_p (LR_REGNO).
26018 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
26019 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
26020 as frame related. */
26021 if (regno == 0)
26022 return true;
26023 /* If we see CR2 then we are here on a Darwin world save. Saves of
26024 CR2 signify the whole CR is being saved. This is a long-standing
26025 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
26026 that CR needs to be saved. */
26027 if (regno == CR2_REGNO)
26028 return true;
26029 /* Omit frame info for any user-defined global regs. If frame info
26030 is supplied for them, frame unwinding will restore a user reg.
26031 Also omit frame info for any reg we don't need to save, as that
26032 bloats frame info and can cause problems with shrink wrapping.
26033 Since global regs won't be seen as needing to be saved, both of
26034 these conditions are covered by save_reg_p. */
26035 return save_reg_p (regno);
26038 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
26039 addresses, not offsets.
26041 REG2 contains the backchain that must be stored into *sp at each allocation.
26043 This is subtly different than the Ada probing above in that it tries hard
26044 to prevent attacks that jump the stack guard. Thus, it is never allowed
26045 to allocate more than PROBE_INTERVAL bytes of stack space without a
26046 suitable probe. */
26048 static const char *
26049 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
26051 static int labelno = 0;
26052 char loop_lab[32];
26053 rtx xops[3];
26055 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
26057 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
26059 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
26061 /* This allocates and probes. */
26062 xops[0] = reg1;
26063 xops[1] = reg2;
26064 xops[2] = GEN_INT (-probe_interval);
26065 if (TARGET_64BIT)
26066 output_asm_insn ("stdu %1,%2(%0)", xops);
26067 else
26068 output_asm_insn ("stwu %1,%2(%0)", xops);
26070 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
26071 xops[0] = reg1;
26072 xops[1] = reg3;
26073 if (TARGET_64BIT)
26074 output_asm_insn ("cmpd 0,%0,%1", xops);
26075 else
26076 output_asm_insn ("cmpw 0,%0,%1", xops);
26078 fputs ("\tbne 0,", asm_out_file);
26079 assemble_name_raw (asm_out_file, loop_lab);
26080 fputc ('\n', asm_out_file);
26082 return "";
26085 /* Wrapper around the output_probe_stack_range routines. */
26086 const char *
26087 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
26089 if (flag_stack_clash_protection)
26090 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
26091 else
26092 return output_probe_stack_range_1 (reg1, reg3);
26095 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26096 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26097 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26098 deduce these equivalences by itself so it wasn't necessary to hold
26099 its hand so much. Don't be tempted to always supply d2_f_d_e with
26100 the actual cfa register, ie. r31 when we are using a hard frame
26101 pointer. That fails when saving regs off r1, and sched moves the
26102 r31 setup past the reg saves. */
26104 static rtx_insn *
26105 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
26106 rtx reg2, rtx repl2)
26108 rtx repl;
26110 if (REGNO (reg) == STACK_POINTER_REGNUM)
26112 gcc_checking_assert (val == 0);
26113 repl = NULL_RTX;
26115 else
26116 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26117 GEN_INT (val));
26119 rtx pat = PATTERN (insn);
26120 if (!repl && !reg2)
26122 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26123 if (GET_CODE (pat) == PARALLEL)
26124 for (int i = 0; i < XVECLEN (pat, 0); i++)
26125 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26127 rtx set = XVECEXP (pat, 0, i);
26129 if (!REG_P (SET_SRC (set))
26130 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26131 RTX_FRAME_RELATED_P (set) = 1;
26133 RTX_FRAME_RELATED_P (insn) = 1;
26134 return insn;
26137 /* We expect that 'pat' is either a SET or a PARALLEL containing
26138 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26139 are important so they all have to be marked RTX_FRAME_RELATED_P.
26140 Call simplify_replace_rtx on the SETs rather than the whole insn
26141 so as to leave the other stuff alone (for example USE of r12). */
26143 set_used_flags (pat);
26144 if (GET_CODE (pat) == SET)
26146 if (repl)
26147 pat = simplify_replace_rtx (pat, reg, repl);
26148 if (reg2)
26149 pat = simplify_replace_rtx (pat, reg2, repl2);
26151 else if (GET_CODE (pat) == PARALLEL)
26153 pat = shallow_copy_rtx (pat);
26154 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26156 for (int i = 0; i < XVECLEN (pat, 0); i++)
26157 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26159 rtx set = XVECEXP (pat, 0, i);
26161 if (repl)
26162 set = simplify_replace_rtx (set, reg, repl);
26163 if (reg2)
26164 set = simplify_replace_rtx (set, reg2, repl2);
26165 XVECEXP (pat, 0, i) = set;
26167 if (!REG_P (SET_SRC (set))
26168 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26169 RTX_FRAME_RELATED_P (set) = 1;
26172 else
26173 gcc_unreachable ();
26175 RTX_FRAME_RELATED_P (insn) = 1;
26176 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26178 return insn;
26181 /* Returns an insn that has a vrsave set operation with the
26182 appropriate CLOBBERs. */
26184 static rtx
26185 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26187 int nclobs, i;
26188 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26189 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26191 clobs[0]
26192 = gen_rtx_SET (vrsave,
26193 gen_rtx_UNSPEC_VOLATILE (SImode,
26194 gen_rtvec (2, reg, vrsave),
26195 UNSPECV_SET_VRSAVE));
26197 nclobs = 1;
26199 /* We need to clobber the registers in the mask so the scheduler
26200 does not move sets to VRSAVE before sets of AltiVec registers.
26202 However, if the function receives nonlocal gotos, reload will set
26203 all call saved registers live. We will end up with:
26205 (set (reg 999) (mem))
26206 (parallel [ (set (reg vrsave) (unspec blah))
26207 (clobber (reg 999))])
26209 The clobber will cause the store into reg 999 to be dead, and
26210 flow will attempt to delete an epilogue insn. In this case, we
26211 need an unspec use/set of the register. */
26213 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26214 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26216 if (!epiloguep || call_used_regs [i])
26217 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26218 gen_rtx_REG (V4SImode, i));
26219 else
26221 rtx reg = gen_rtx_REG (V4SImode, i);
26223 clobs[nclobs++]
26224 = gen_rtx_SET (reg,
26225 gen_rtx_UNSPEC (V4SImode,
26226 gen_rtvec (1, reg), 27));
26230 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26232 for (i = 0; i < nclobs; ++i)
26233 XVECEXP (insn, 0, i) = clobs[i];
26235 return insn;
26238 static rtx
26239 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26241 rtx addr, mem;
26243 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26244 mem = gen_frame_mem (GET_MODE (reg), addr);
26245 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26248 static rtx
26249 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26251 return gen_frame_set (reg, frame_reg, offset, false);
26254 static rtx
26255 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26257 return gen_frame_set (reg, frame_reg, offset, true);
26260 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26261 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26263 static rtx_insn *
26264 emit_frame_save (rtx frame_reg, machine_mode mode,
26265 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26267 rtx reg;
26269 /* Some cases that need register indexed addressing. */
26270 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26271 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26273 reg = gen_rtx_REG (mode, regno);
26274 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26275 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26276 NULL_RTX, NULL_RTX);
26279 /* Emit an offset memory reference suitable for a frame store, while
26280 converting to a valid addressing mode. */
26282 static rtx
26283 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26285 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26288 #ifndef TARGET_FIX_AND_CONTINUE
26289 #define TARGET_FIX_AND_CONTINUE 0
26290 #endif
26292 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26293 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26294 #define LAST_SAVRES_REGISTER 31
26295 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26297 enum {
26298 SAVRES_LR = 0x1,
26299 SAVRES_SAVE = 0x2,
26300 SAVRES_REG = 0x0c,
26301 SAVRES_GPR = 0,
26302 SAVRES_FPR = 4,
26303 SAVRES_VR = 8
26306 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26308 /* Temporary holding space for an out-of-line register save/restore
26309 routine name. */
26310 static char savres_routine_name[30];
26312 /* Return the name for an out-of-line register save/restore routine.
26313 We are saving/restoring GPRs if GPR is true. */
26315 static char *
26316 rs6000_savres_routine_name (int regno, int sel)
26318 const char *prefix = "";
26319 const char *suffix = "";
26321 /* Different targets are supposed to define
26322 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26323 routine name could be defined with:
26325 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26327 This is a nice idea in practice, but in reality, things are
26328 complicated in several ways:
26330 - ELF targets have save/restore routines for GPRs.
26332 - PPC64 ELF targets have routines for save/restore of GPRs that
26333 differ in what they do with the link register, so having a set
26334 prefix doesn't work. (We only use one of the save routines at
26335 the moment, though.)
26337 - PPC32 elf targets have "exit" versions of the restore routines
26338 that restore the link register and can save some extra space.
26339 These require an extra suffix. (There are also "tail" versions
26340 of the restore routines and "GOT" versions of the save routines,
26341 but we don't generate those at present. Same problems apply,
26342 though.)
26344 We deal with all this by synthesizing our own prefix/suffix and
26345 using that for the simple sprintf call shown above. */
26346 if (DEFAULT_ABI == ABI_V4)
26348 if (TARGET_64BIT)
26349 goto aix_names;
26351 if ((sel & SAVRES_REG) == SAVRES_GPR)
26352 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26353 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26354 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26355 else if ((sel & SAVRES_REG) == SAVRES_VR)
26356 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26357 else
26358 abort ();
26360 if ((sel & SAVRES_LR))
26361 suffix = "_x";
26363 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26365 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26366 /* No out-of-line save/restore routines for GPRs on AIX. */
26367 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26368 #endif
26370 aix_names:
26371 if ((sel & SAVRES_REG) == SAVRES_GPR)
26372 prefix = ((sel & SAVRES_SAVE)
26373 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26374 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26375 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26377 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26378 if ((sel & SAVRES_LR))
26379 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26380 else
26381 #endif
26383 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26384 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26387 else if ((sel & SAVRES_REG) == SAVRES_VR)
26388 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26389 else
26390 abort ();
26393 if (DEFAULT_ABI == ABI_DARWIN)
26395 /* The Darwin approach is (slightly) different, in order to be
26396 compatible with code generated by the system toolchain. There is a
26397 single symbol for the start of save sequence, and the code here
26398 embeds an offset into that code on the basis of the first register
26399 to be saved. */
26400 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26401 if ((sel & SAVRES_REG) == SAVRES_GPR)
26402 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26403 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26404 (regno - 13) * 4, prefix, regno);
26405 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26406 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26407 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26408 else if ((sel & SAVRES_REG) == SAVRES_VR)
26409 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26410 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26411 else
26412 abort ();
26414 else
26415 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26417 return savres_routine_name;
26420 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26421 We are saving/restoring GPRs if GPR is true. */
26423 static rtx
26424 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26426 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26427 ? info->first_gp_reg_save
26428 : (sel & SAVRES_REG) == SAVRES_FPR
26429 ? info->first_fp_reg_save - 32
26430 : (sel & SAVRES_REG) == SAVRES_VR
26431 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26432 : -1);
26433 rtx sym;
26434 int select = sel;
26436 /* Don't generate bogus routine names. */
26437 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26438 && regno <= LAST_SAVRES_REGISTER
26439 && select >= 0 && select <= 12);
26441 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26443 if (sym == NULL)
26445 char *name;
26447 name = rs6000_savres_routine_name (regno, sel);
26449 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26450 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26451 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26454 return sym;
26457 /* Emit a sequence of insns, including a stack tie if needed, for
26458 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26459 reset the stack pointer, but move the base of the frame into
26460 reg UPDT_REGNO for use by out-of-line register restore routines. */
26462 static rtx
26463 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26464 unsigned updt_regno)
26466 /* If there is nothing to do, don't do anything. */
26467 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26468 return NULL_RTX;
26470 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26472 /* This blockage is needed so that sched doesn't decide to move
26473 the sp change before the register restores. */
26474 if (DEFAULT_ABI == ABI_V4)
26475 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26476 GEN_INT (frame_off)));
26478 /* If we are restoring registers out-of-line, we will be using the
26479 "exit" variants of the restore routines, which will reset the
26480 stack for us. But we do need to point updt_reg into the
26481 right place for those routines. */
26482 if (frame_off != 0)
26483 return emit_insn (gen_add3_insn (updt_reg_rtx,
26484 frame_reg_rtx, GEN_INT (frame_off)));
26485 else
26486 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26488 return NULL_RTX;
26491 /* Return the register number used as a pointer by out-of-line
26492 save/restore functions. */
26494 static inline unsigned
26495 ptr_regno_for_savres (int sel)
26497 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26498 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26499 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26502 /* Construct a parallel rtx describing the effect of a call to an
26503 out-of-line register save/restore routine, and emit the insn
26504 or jump_insn as appropriate. */
26506 static rtx_insn *
26507 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26508 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26509 machine_mode reg_mode, int sel)
26511 int i;
26512 int offset, start_reg, end_reg, n_regs, use_reg;
26513 int reg_size = GET_MODE_SIZE (reg_mode);
26514 rtx sym;
26515 rtvec p;
26516 rtx par;
26517 rtx_insn *insn;
26519 offset = 0;
26520 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26521 ? info->first_gp_reg_save
26522 : (sel & SAVRES_REG) == SAVRES_FPR
26523 ? info->first_fp_reg_save
26524 : (sel & SAVRES_REG) == SAVRES_VR
26525 ? info->first_altivec_reg_save
26526 : -1);
26527 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26528 ? 32
26529 : (sel & SAVRES_REG) == SAVRES_FPR
26530 ? 64
26531 : (sel & SAVRES_REG) == SAVRES_VR
26532 ? LAST_ALTIVEC_REGNO + 1
26533 : -1);
26534 n_regs = end_reg - start_reg;
26535 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26536 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26537 + n_regs);
26539 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26540 RTVEC_ELT (p, offset++) = ret_rtx;
26542 RTVEC_ELT (p, offset++)
26543 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26545 sym = rs6000_savres_routine_sym (info, sel);
26546 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26548 use_reg = ptr_regno_for_savres (sel);
26549 if ((sel & SAVRES_REG) == SAVRES_VR)
26551 /* Vector regs are saved/restored using [reg+reg] addressing. */
26552 RTVEC_ELT (p, offset++)
26553 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26554 RTVEC_ELT (p, offset++)
26555 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26557 else
26558 RTVEC_ELT (p, offset++)
26559 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26561 for (i = 0; i < end_reg - start_reg; i++)
26562 RTVEC_ELT (p, i + offset)
26563 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26564 frame_reg_rtx, save_area_offset + reg_size * i,
26565 (sel & SAVRES_SAVE) != 0);
26567 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26568 RTVEC_ELT (p, i + offset)
26569 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26571 par = gen_rtx_PARALLEL (VOIDmode, p);
26573 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26575 insn = emit_jump_insn (par);
26576 JUMP_LABEL (insn) = ret_rtx;
26578 else
26579 insn = emit_insn (par);
26580 return insn;
26583 /* Emit prologue code to store CR fields that need to be saved into REG. This
26584 function should only be called when moving the non-volatile CRs to REG, it
26585 is not a general purpose routine to move the entire set of CRs to REG.
26586 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26587 volatile CRs. */
26589 static void
26590 rs6000_emit_prologue_move_from_cr (rtx reg)
26592 /* Only the ELFv2 ABI allows storing only selected fields. */
26593 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26595 int i, cr_reg[8], count = 0;
26597 /* Collect CR fields that must be saved. */
26598 for (i = 0; i < 8; i++)
26599 if (save_reg_p (CR0_REGNO + i))
26600 cr_reg[count++] = i;
26602 /* If it's just a single one, use mfcrf. */
26603 if (count == 1)
26605 rtvec p = rtvec_alloc (1);
26606 rtvec r = rtvec_alloc (2);
26607 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26608 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26609 RTVEC_ELT (p, 0)
26610 = gen_rtx_SET (reg,
26611 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26613 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26614 return;
26617 /* ??? It might be better to handle count == 2 / 3 cases here
26618 as well, using logical operations to combine the values. */
26621 emit_insn (gen_prologue_movesi_from_cr (reg));
26624 /* Return whether the split-stack arg pointer (r12) is used. */
26626 static bool
26627 split_stack_arg_pointer_used_p (void)
26629 /* If the pseudo holding the arg pointer is no longer a pseudo,
26630 then the arg pointer is used. */
26631 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26632 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26633 || (REGNO (cfun->machine->split_stack_arg_pointer)
26634 < FIRST_PSEUDO_REGISTER)))
26635 return true;
26637 /* Unfortunately we also need to do some code scanning, since
26638 r12 may have been substituted for the pseudo. */
26639 rtx_insn *insn;
26640 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26641 FOR_BB_INSNS (bb, insn)
26642 if (NONDEBUG_INSN_P (insn))
26644 /* A call destroys r12. */
26645 if (CALL_P (insn))
26646 return false;
26648 df_ref use;
26649 FOR_EACH_INSN_USE (use, insn)
26651 rtx x = DF_REF_REG (use);
26652 if (REG_P (x) && REGNO (x) == 12)
26653 return true;
26655 df_ref def;
26656 FOR_EACH_INSN_DEF (def, insn)
26658 rtx x = DF_REF_REG (def);
26659 if (REG_P (x) && REGNO (x) == 12)
26660 return false;
26663 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26666 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26668 static bool
26669 rs6000_global_entry_point_needed_p (void)
26671 /* Only needed for the ELFv2 ABI. */
26672 if (DEFAULT_ABI != ABI_ELFv2)
26673 return false;
26675 /* With -msingle-pic-base, we assume the whole program shares the same
26676 TOC, so no global entry point prologues are needed anywhere. */
26677 if (TARGET_SINGLE_PIC_BASE)
26678 return false;
26680 /* Ensure we have a global entry point for thunks. ??? We could
26681 avoid that if the target routine doesn't need a global entry point,
26682 but we do not know whether this is the case at this point. */
26683 if (cfun->is_thunk)
26684 return true;
26686 /* For regular functions, rs6000_emit_prologue sets this flag if the
26687 routine ever uses the TOC pointer. */
26688 return cfun->machine->r2_setup_needed;
26691 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26692 static sbitmap
26693 rs6000_get_separate_components (void)
26695 rs6000_stack_t *info = rs6000_stack_info ();
26697 if (WORLD_SAVE_P (info))
26698 return NULL;
26700 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26701 && !(info->savres_strategy & REST_MULTIPLE));
26703 /* Component 0 is the save/restore of LR (done via GPR0).
26704 Component 2 is the save of the TOC (GPR2).
26705 Components 13..31 are the save/restore of GPR13..GPR31.
26706 Components 46..63 are the save/restore of FPR14..FPR31. */
26708 cfun->machine->n_components = 64;
26710 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26711 bitmap_clear (components);
26713 int reg_size = TARGET_32BIT ? 4 : 8;
26714 int fp_reg_size = 8;
26716 /* The GPRs we need saved to the frame. */
26717 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26718 && (info->savres_strategy & REST_INLINE_GPRS))
26720 int offset = info->gp_save_offset;
26721 if (info->push_p)
26722 offset += info->total_size;
26724 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26726 if (IN_RANGE (offset, -0x8000, 0x7fff)
26727 && save_reg_p (regno))
26728 bitmap_set_bit (components, regno);
26730 offset += reg_size;
26734 /* Don't mess with the hard frame pointer. */
26735 if (frame_pointer_needed)
26736 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26738 /* Don't mess with the fixed TOC register. */
26739 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26740 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26741 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26742 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26744 /* The FPRs we need saved to the frame. */
26745 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26746 && (info->savres_strategy & REST_INLINE_FPRS))
26748 int offset = info->fp_save_offset;
26749 if (info->push_p)
26750 offset += info->total_size;
26752 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26754 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26755 bitmap_set_bit (components, regno);
26757 offset += fp_reg_size;
26761 /* Optimize LR save and restore if we can. This is component 0. Any
26762 out-of-line register save/restore routines need LR. */
26763 if (info->lr_save_p
26764 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26765 && (info->savres_strategy & SAVE_INLINE_GPRS)
26766 && (info->savres_strategy & REST_INLINE_GPRS)
26767 && (info->savres_strategy & SAVE_INLINE_FPRS)
26768 && (info->savres_strategy & REST_INLINE_FPRS)
26769 && (info->savres_strategy & SAVE_INLINE_VRS)
26770 && (info->savres_strategy & REST_INLINE_VRS))
26772 int offset = info->lr_save_offset;
26773 if (info->push_p)
26774 offset += info->total_size;
26775 if (IN_RANGE (offset, -0x8000, 0x7fff))
26776 bitmap_set_bit (components, 0);
26779 /* Optimize saving the TOC. This is component 2. */
26780 if (cfun->machine->save_toc_in_prologue)
26781 bitmap_set_bit (components, 2);
26783 return components;
26786 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26787 static sbitmap
26788 rs6000_components_for_bb (basic_block bb)
26790 rs6000_stack_t *info = rs6000_stack_info ();
26792 bitmap in = DF_LIVE_IN (bb);
26793 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26794 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26796 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26797 bitmap_clear (components);
26799 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26801 /* GPRs. */
26802 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26803 if (bitmap_bit_p (in, regno)
26804 || bitmap_bit_p (gen, regno)
26805 || bitmap_bit_p (kill, regno))
26806 bitmap_set_bit (components, regno);
26808 /* FPRs. */
26809 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26810 if (bitmap_bit_p (in, regno)
26811 || bitmap_bit_p (gen, regno)
26812 || bitmap_bit_p (kill, regno))
26813 bitmap_set_bit (components, regno);
26815 /* The link register. */
26816 if (bitmap_bit_p (in, LR_REGNO)
26817 || bitmap_bit_p (gen, LR_REGNO)
26818 || bitmap_bit_p (kill, LR_REGNO))
26819 bitmap_set_bit (components, 0);
26821 /* The TOC save. */
26822 if (bitmap_bit_p (in, TOC_REGNUM)
26823 || bitmap_bit_p (gen, TOC_REGNUM)
26824 || bitmap_bit_p (kill, TOC_REGNUM))
26825 bitmap_set_bit (components, 2);
26827 return components;
26830 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26831 static void
26832 rs6000_disqualify_components (sbitmap components, edge e,
26833 sbitmap edge_components, bool /*is_prologue*/)
26835 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26836 live where we want to place that code. */
26837 if (bitmap_bit_p (edge_components, 0)
26838 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26840 if (dump_file)
26841 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26842 "on entry to bb %d\n", e->dest->index);
26843 bitmap_clear_bit (components, 0);
26847 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26848 static void
26849 rs6000_emit_prologue_components (sbitmap components)
26851 rs6000_stack_t *info = rs6000_stack_info ();
26852 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26853 ? HARD_FRAME_POINTER_REGNUM
26854 : STACK_POINTER_REGNUM);
26856 machine_mode reg_mode = Pmode;
26857 int reg_size = TARGET_32BIT ? 4 : 8;
26858 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26859 ? DFmode : SFmode;
26860 int fp_reg_size = 8;
26862 /* Prologue for LR. */
26863 if (bitmap_bit_p (components, 0))
26865 rtx reg = gen_rtx_REG (reg_mode, 0);
26866 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26867 RTX_FRAME_RELATED_P (insn) = 1;
26868 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26870 int offset = info->lr_save_offset;
26871 if (info->push_p)
26872 offset += info->total_size;
26874 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26875 RTX_FRAME_RELATED_P (insn) = 1;
26876 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26877 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26878 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26881 /* Prologue for TOC. */
26882 if (bitmap_bit_p (components, 2))
26884 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26885 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26886 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26889 /* Prologue for the GPRs. */
26890 int offset = info->gp_save_offset;
26891 if (info->push_p)
26892 offset += info->total_size;
26894 for (int i = info->first_gp_reg_save; i < 32; i++)
26896 if (bitmap_bit_p (components, i))
26898 rtx reg = gen_rtx_REG (reg_mode, i);
26899 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26900 RTX_FRAME_RELATED_P (insn) = 1;
26901 rtx set = copy_rtx (single_set (insn));
26902 add_reg_note (insn, REG_CFA_OFFSET, set);
26905 offset += reg_size;
26908 /* Prologue for the FPRs. */
26909 offset = info->fp_save_offset;
26910 if (info->push_p)
26911 offset += info->total_size;
26913 for (int i = info->first_fp_reg_save; i < 64; i++)
26915 if (bitmap_bit_p (components, i))
26917 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26918 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26919 RTX_FRAME_RELATED_P (insn) = 1;
26920 rtx set = copy_rtx (single_set (insn));
26921 add_reg_note (insn, REG_CFA_OFFSET, set);
26924 offset += fp_reg_size;
26928 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26929 static void
26930 rs6000_emit_epilogue_components (sbitmap components)
26932 rs6000_stack_t *info = rs6000_stack_info ();
26933 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26934 ? HARD_FRAME_POINTER_REGNUM
26935 : STACK_POINTER_REGNUM);
26937 machine_mode reg_mode = Pmode;
26938 int reg_size = TARGET_32BIT ? 4 : 8;
26940 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26941 ? DFmode : SFmode;
26942 int fp_reg_size = 8;
26944 /* Epilogue for the FPRs. */
26945 int offset = info->fp_save_offset;
26946 if (info->push_p)
26947 offset += info->total_size;
26949 for (int i = info->first_fp_reg_save; i < 64; i++)
26951 if (bitmap_bit_p (components, i))
26953 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26954 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26955 RTX_FRAME_RELATED_P (insn) = 1;
26956 add_reg_note (insn, REG_CFA_RESTORE, reg);
26959 offset += fp_reg_size;
26962 /* Epilogue for the GPRs. */
26963 offset = info->gp_save_offset;
26964 if (info->push_p)
26965 offset += info->total_size;
26967 for (int i = info->first_gp_reg_save; i < 32; i++)
26969 if (bitmap_bit_p (components, i))
26971 rtx reg = gen_rtx_REG (reg_mode, i);
26972 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26973 RTX_FRAME_RELATED_P (insn) = 1;
26974 add_reg_note (insn, REG_CFA_RESTORE, reg);
26977 offset += reg_size;
26980 /* Epilogue for LR. */
26981 if (bitmap_bit_p (components, 0))
26983 int offset = info->lr_save_offset;
26984 if (info->push_p)
26985 offset += info->total_size;
26987 rtx reg = gen_rtx_REG (reg_mode, 0);
26988 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26990 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26991 insn = emit_move_insn (lr, reg);
26992 RTX_FRAME_RELATED_P (insn) = 1;
26993 add_reg_note (insn, REG_CFA_RESTORE, lr);
26997 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26998 static void
26999 rs6000_set_handled_components (sbitmap components)
27001 rs6000_stack_t *info = rs6000_stack_info ();
27003 for (int i = info->first_gp_reg_save; i < 32; i++)
27004 if (bitmap_bit_p (components, i))
27005 cfun->machine->gpr_is_wrapped_separately[i] = true;
27007 for (int i = info->first_fp_reg_save; i < 64; i++)
27008 if (bitmap_bit_p (components, i))
27009 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
27011 if (bitmap_bit_p (components, 0))
27012 cfun->machine->lr_is_wrapped_separately = true;
27014 if (bitmap_bit_p (components, 2))
27015 cfun->machine->toc_is_wrapped_separately = true;
27018 /* VRSAVE is a bit vector representing which AltiVec registers
27019 are used. The OS uses this to determine which vector
27020 registers to save on a context switch. We need to save
27021 VRSAVE on the stack frame, add whatever AltiVec registers we
27022 used in this function, and do the corresponding magic in the
27023 epilogue. */
27024 static void
27025 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
27026 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
27028 /* Get VRSAVE into a GPR. */
27029 rtx reg = gen_rtx_REG (SImode, save_regno);
27030 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27031 if (TARGET_MACHO)
27032 emit_insn (gen_get_vrsave_internal (reg));
27033 else
27034 emit_insn (gen_rtx_SET (reg, vrsave));
27036 /* Save VRSAVE. */
27037 int offset = info->vrsave_save_offset + frame_off;
27038 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27040 /* Include the registers in the mask. */
27041 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
27043 emit_insn (generate_set_vrsave (reg, info, 0));
27046 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
27047 called, it left the arg pointer to the old stack in r29. Otherwise, the
27048 arg pointer is the top of the current frame. */
27049 static void
27050 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
27051 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
27053 cfun->machine->split_stack_argp_used = true;
27055 if (sp_adjust)
27057 rtx r12 = gen_rtx_REG (Pmode, 12);
27058 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27059 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27060 emit_insn_before (set_r12, sp_adjust);
27062 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27064 rtx r12 = gen_rtx_REG (Pmode, 12);
27065 if (frame_off == 0)
27066 emit_move_insn (r12, frame_reg_rtx);
27067 else
27068 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27071 if (info->push_p)
27073 rtx r12 = gen_rtx_REG (Pmode, 12);
27074 rtx r29 = gen_rtx_REG (Pmode, 29);
27075 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27076 rtx not_more = gen_label_rtx ();
27077 rtx jump;
27079 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27080 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27081 gen_rtx_LABEL_REF (VOIDmode, not_more),
27082 pc_rtx);
27083 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27084 JUMP_LABEL (jump) = not_more;
27085 LABEL_NUSES (not_more) += 1;
27086 emit_move_insn (r12, r29);
27087 emit_label (not_more);
27091 /* Emit function prologue as insns. */
27093 void
27094 rs6000_emit_prologue (void)
27096 rs6000_stack_t *info = rs6000_stack_info ();
27097 machine_mode reg_mode = Pmode;
27098 int reg_size = TARGET_32BIT ? 4 : 8;
27099 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27100 ? DFmode : SFmode;
27101 int fp_reg_size = 8;
27102 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27103 rtx frame_reg_rtx = sp_reg_rtx;
27104 unsigned int cr_save_regno;
27105 rtx cr_save_rtx = NULL_RTX;
27106 rtx_insn *insn;
27107 int strategy;
27108 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27109 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27110 && call_used_regs[STATIC_CHAIN_REGNUM]);
27111 int using_split_stack = (flag_split_stack
27112 && (lookup_attribute ("no_split_stack",
27113 DECL_ATTRIBUTES (cfun->decl))
27114 == NULL));
27116 /* Offset to top of frame for frame_reg and sp respectively. */
27117 HOST_WIDE_INT frame_off = 0;
27118 HOST_WIDE_INT sp_off = 0;
27119 /* sp_adjust is the stack adjusting instruction, tracked so that the
27120 insn setting up the split-stack arg pointer can be emitted just
27121 prior to it, when r12 is not used here for other purposes. */
27122 rtx_insn *sp_adjust = 0;
27124 #if CHECKING_P
27125 /* Track and check usage of r0, r11, r12. */
27126 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27127 #define START_USE(R) do \
27129 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27130 reg_inuse |= 1 << (R); \
27131 } while (0)
27132 #define END_USE(R) do \
27134 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27135 reg_inuse &= ~(1 << (R)); \
27136 } while (0)
27137 #define NOT_INUSE(R) do \
27139 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27140 } while (0)
27141 #else
27142 #define START_USE(R) do {} while (0)
27143 #define END_USE(R) do {} while (0)
27144 #define NOT_INUSE(R) do {} while (0)
27145 #endif
27147 if (DEFAULT_ABI == ABI_ELFv2
27148 && !TARGET_SINGLE_PIC_BASE)
27150 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27152 /* With -mminimal-toc we may generate an extra use of r2 below. */
27153 if (TARGET_TOC && TARGET_MINIMAL_TOC
27154 && !constant_pool_empty_p ())
27155 cfun->machine->r2_setup_needed = true;
27159 if (flag_stack_usage_info)
27160 current_function_static_stack_size = info->total_size;
27162 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27164 HOST_WIDE_INT size = info->total_size;
27166 if (crtl->is_leaf && !cfun->calls_alloca)
27168 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27169 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27170 size - get_stack_check_protect ());
27172 else if (size > 0)
27173 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27176 if (TARGET_FIX_AND_CONTINUE)
27178 /* gdb on darwin arranges to forward a function from the old
27179 address by modifying the first 5 instructions of the function
27180 to branch to the overriding function. This is necessary to
27181 permit function pointers that point to the old function to
27182 actually forward to the new function. */
27183 emit_insn (gen_nop ());
27184 emit_insn (gen_nop ());
27185 emit_insn (gen_nop ());
27186 emit_insn (gen_nop ());
27187 emit_insn (gen_nop ());
27190 /* Handle world saves specially here. */
27191 if (WORLD_SAVE_P (info))
27193 int i, j, sz;
27194 rtx treg;
27195 rtvec p;
27196 rtx reg0;
27198 /* save_world expects lr in r0. */
27199 reg0 = gen_rtx_REG (Pmode, 0);
27200 if (info->lr_save_p)
27202 insn = emit_move_insn (reg0,
27203 gen_rtx_REG (Pmode, LR_REGNO));
27204 RTX_FRAME_RELATED_P (insn) = 1;
27207 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27208 assumptions about the offsets of various bits of the stack
27209 frame. */
27210 gcc_assert (info->gp_save_offset == -220
27211 && info->fp_save_offset == -144
27212 && info->lr_save_offset == 8
27213 && info->cr_save_offset == 4
27214 && info->push_p
27215 && info->lr_save_p
27216 && (!crtl->calls_eh_return
27217 || info->ehrd_offset == -432)
27218 && info->vrsave_save_offset == -224
27219 && info->altivec_save_offset == -416);
27221 treg = gen_rtx_REG (SImode, 11);
27222 emit_move_insn (treg, GEN_INT (-info->total_size));
27224 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27225 in R11. It also clobbers R12, so beware! */
27227 /* Preserve CR2 for save_world prologues */
27228 sz = 5;
27229 sz += 32 - info->first_gp_reg_save;
27230 sz += 64 - info->first_fp_reg_save;
27231 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27232 p = rtvec_alloc (sz);
27233 j = 0;
27234 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
27235 gen_rtx_REG (SImode,
27236 LR_REGNO));
27237 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27238 gen_rtx_SYMBOL_REF (Pmode,
27239 "*save_world"));
27240 /* We do floats first so that the instruction pattern matches
27241 properly. */
27242 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27243 RTVEC_ELT (p, j++)
27244 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27245 ? DFmode : SFmode,
27246 info->first_fp_reg_save + i),
27247 frame_reg_rtx,
27248 info->fp_save_offset + frame_off + 8 * i);
27249 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27250 RTVEC_ELT (p, j++)
27251 = gen_frame_store (gen_rtx_REG (V4SImode,
27252 info->first_altivec_reg_save + i),
27253 frame_reg_rtx,
27254 info->altivec_save_offset + frame_off + 16 * i);
27255 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27256 RTVEC_ELT (p, j++)
27257 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27258 frame_reg_rtx,
27259 info->gp_save_offset + frame_off + reg_size * i);
27261 /* CR register traditionally saved as CR2. */
27262 RTVEC_ELT (p, j++)
27263 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27264 frame_reg_rtx, info->cr_save_offset + frame_off);
27265 /* Explain about use of R0. */
27266 if (info->lr_save_p)
27267 RTVEC_ELT (p, j++)
27268 = gen_frame_store (reg0,
27269 frame_reg_rtx, info->lr_save_offset + frame_off);
27270 /* Explain what happens to the stack pointer. */
27272 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27273 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27276 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27277 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27278 treg, GEN_INT (-info->total_size));
27279 sp_off = frame_off = info->total_size;
27282 strategy = info->savres_strategy;
27284 /* For V.4, update stack before we do any saving and set back pointer. */
27285 if (! WORLD_SAVE_P (info)
27286 && info->push_p
27287 && (DEFAULT_ABI == ABI_V4
27288 || crtl->calls_eh_return))
27290 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27291 || !(strategy & SAVE_INLINE_GPRS)
27292 || !(strategy & SAVE_INLINE_VRS));
27293 int ptr_regno = -1;
27294 rtx ptr_reg = NULL_RTX;
27295 int ptr_off = 0;
27297 if (info->total_size < 32767)
27298 frame_off = info->total_size;
27299 else if (need_r11)
27300 ptr_regno = 11;
27301 else if (info->cr_save_p
27302 || info->lr_save_p
27303 || info->first_fp_reg_save < 64
27304 || info->first_gp_reg_save < 32
27305 || info->altivec_size != 0
27306 || info->vrsave_size != 0
27307 || crtl->calls_eh_return)
27308 ptr_regno = 12;
27309 else
27311 /* The prologue won't be saving any regs so there is no need
27312 to set up a frame register to access any frame save area.
27313 We also won't be using frame_off anywhere below, but set
27314 the correct value anyway to protect against future
27315 changes to this function. */
27316 frame_off = info->total_size;
27318 if (ptr_regno != -1)
27320 /* Set up the frame offset to that needed by the first
27321 out-of-line save function. */
27322 START_USE (ptr_regno);
27323 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27324 frame_reg_rtx = ptr_reg;
27325 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27326 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27327 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27328 ptr_off = info->gp_save_offset + info->gp_size;
27329 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27330 ptr_off = info->altivec_save_offset + info->altivec_size;
27331 frame_off = -ptr_off;
27333 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27334 ptr_reg, ptr_off);
27335 if (REGNO (frame_reg_rtx) == 12)
27336 sp_adjust = 0;
27337 sp_off = info->total_size;
27338 if (frame_reg_rtx != sp_reg_rtx)
27339 rs6000_emit_stack_tie (frame_reg_rtx, false);
27342 /* If we use the link register, get it into r0. */
27343 if (!WORLD_SAVE_P (info) && info->lr_save_p
27344 && !cfun->machine->lr_is_wrapped_separately)
27346 rtx addr, reg, mem;
27348 reg = gen_rtx_REG (Pmode, 0);
27349 START_USE (0);
27350 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27351 RTX_FRAME_RELATED_P (insn) = 1;
27353 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27354 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27356 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27357 GEN_INT (info->lr_save_offset + frame_off));
27358 mem = gen_rtx_MEM (Pmode, addr);
27359 /* This should not be of rs6000_sr_alias_set, because of
27360 __builtin_return_address. */
27362 insn = emit_move_insn (mem, reg);
27363 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27364 NULL_RTX, NULL_RTX);
27365 END_USE (0);
27369 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27370 r12 will be needed by out-of-line gpr restore. */
27371 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27372 && !(strategy & (SAVE_INLINE_GPRS
27373 | SAVE_NOINLINE_GPRS_SAVES_LR))
27374 ? 11 : 12);
27375 if (!WORLD_SAVE_P (info)
27376 && info->cr_save_p
27377 && REGNO (frame_reg_rtx) != cr_save_regno
27378 && !(using_static_chain_p && cr_save_regno == 11)
27379 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27381 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27382 START_USE (cr_save_regno);
27383 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27386 /* Do any required saving of fpr's. If only one or two to save, do
27387 it ourselves. Otherwise, call function. */
27388 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27390 int offset = info->fp_save_offset + frame_off;
27391 for (int i = info->first_fp_reg_save; i < 64; i++)
27393 if (save_reg_p (i)
27394 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27395 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27396 sp_off - frame_off);
27398 offset += fp_reg_size;
27401 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27403 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27404 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27405 unsigned ptr_regno = ptr_regno_for_savres (sel);
27406 rtx ptr_reg = frame_reg_rtx;
27408 if (REGNO (frame_reg_rtx) == ptr_regno)
27409 gcc_checking_assert (frame_off == 0);
27410 else
27412 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27413 NOT_INUSE (ptr_regno);
27414 emit_insn (gen_add3_insn (ptr_reg,
27415 frame_reg_rtx, GEN_INT (frame_off)));
27417 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27418 info->fp_save_offset,
27419 info->lr_save_offset,
27420 DFmode, sel);
27421 rs6000_frame_related (insn, ptr_reg, sp_off,
27422 NULL_RTX, NULL_RTX);
27423 if (lr)
27424 END_USE (0);
27427 /* Save GPRs. This is done as a PARALLEL if we are using
27428 the store-multiple instructions. */
27429 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27431 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27432 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27433 unsigned ptr_regno = ptr_regno_for_savres (sel);
27434 rtx ptr_reg = frame_reg_rtx;
27435 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27436 int end_save = info->gp_save_offset + info->gp_size;
27437 int ptr_off;
27439 if (ptr_regno == 12)
27440 sp_adjust = 0;
27441 if (!ptr_set_up)
27442 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27444 /* Need to adjust r11 (r12) if we saved any FPRs. */
27445 if (end_save + frame_off != 0)
27447 rtx offset = GEN_INT (end_save + frame_off);
27449 if (ptr_set_up)
27450 frame_off = -end_save;
27451 else
27452 NOT_INUSE (ptr_regno);
27453 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27455 else if (!ptr_set_up)
27457 NOT_INUSE (ptr_regno);
27458 emit_move_insn (ptr_reg, frame_reg_rtx);
27460 ptr_off = -end_save;
27461 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27462 info->gp_save_offset + ptr_off,
27463 info->lr_save_offset + ptr_off,
27464 reg_mode, sel);
27465 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27466 NULL_RTX, NULL_RTX);
27467 if (lr)
27468 END_USE (0);
27470 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27472 rtvec p;
27473 int i;
27474 p = rtvec_alloc (32 - info->first_gp_reg_save);
27475 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27476 RTVEC_ELT (p, i)
27477 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27478 frame_reg_rtx,
27479 info->gp_save_offset + frame_off + reg_size * i);
27480 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27481 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27482 NULL_RTX, NULL_RTX);
27484 else if (!WORLD_SAVE_P (info))
27486 int offset = info->gp_save_offset + frame_off;
27487 for (int i = info->first_gp_reg_save; i < 32; i++)
27489 if (save_reg_p (i)
27490 && !cfun->machine->gpr_is_wrapped_separately[i])
27491 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27492 sp_off - frame_off);
27494 offset += reg_size;
27498 if (crtl->calls_eh_return)
27500 unsigned int i;
27501 rtvec p;
27503 for (i = 0; ; ++i)
27505 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27506 if (regno == INVALID_REGNUM)
27507 break;
27510 p = rtvec_alloc (i);
27512 for (i = 0; ; ++i)
27514 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27515 if (regno == INVALID_REGNUM)
27516 break;
27518 rtx set
27519 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27520 sp_reg_rtx,
27521 info->ehrd_offset + sp_off + reg_size * (int) i);
27522 RTVEC_ELT (p, i) = set;
27523 RTX_FRAME_RELATED_P (set) = 1;
27526 insn = emit_insn (gen_blockage ());
27527 RTX_FRAME_RELATED_P (insn) = 1;
27528 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27531 /* In AIX ABI we need to make sure r2 is really saved. */
27532 if (TARGET_AIX && crtl->calls_eh_return)
27534 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27535 rtx join_insn, note;
27536 rtx_insn *save_insn;
27537 long toc_restore_insn;
27539 tmp_reg = gen_rtx_REG (Pmode, 11);
27540 tmp_reg_si = gen_rtx_REG (SImode, 11);
27541 if (using_static_chain_p)
27543 START_USE (0);
27544 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27546 else
27547 START_USE (11);
27548 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27549 /* Peek at instruction to which this function returns. If it's
27550 restoring r2, then we know we've already saved r2. We can't
27551 unconditionally save r2 because the value we have will already
27552 be updated if we arrived at this function via a plt call or
27553 toc adjusting stub. */
27554 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27555 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27556 + RS6000_TOC_SAVE_SLOT);
27557 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27558 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27559 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27560 validate_condition_mode (EQ, CCUNSmode);
27561 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27562 emit_insn (gen_rtx_SET (compare_result,
27563 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27564 toc_save_done = gen_label_rtx ();
27565 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27566 gen_rtx_EQ (VOIDmode, compare_result,
27567 const0_rtx),
27568 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27569 pc_rtx);
27570 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27571 JUMP_LABEL (jump) = toc_save_done;
27572 LABEL_NUSES (toc_save_done) += 1;
27574 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27575 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27576 sp_off - frame_off);
27578 emit_label (toc_save_done);
27580 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27581 have a CFG that has different saves along different paths.
27582 Move the note to a dummy blockage insn, which describes that
27583 R2 is unconditionally saved after the label. */
27584 /* ??? An alternate representation might be a special insn pattern
27585 containing both the branch and the store. That might let the
27586 code that minimizes the number of DW_CFA_advance opcodes better
27587 freedom in placing the annotations. */
27588 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27589 if (note)
27590 remove_note (save_insn, note);
27591 else
27592 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27593 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27594 RTX_FRAME_RELATED_P (save_insn) = 0;
27596 join_insn = emit_insn (gen_blockage ());
27597 REG_NOTES (join_insn) = note;
27598 RTX_FRAME_RELATED_P (join_insn) = 1;
27600 if (using_static_chain_p)
27602 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27603 END_USE (0);
27605 else
27606 END_USE (11);
27609 /* Save CR if we use any that must be preserved. */
27610 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27612 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27613 GEN_INT (info->cr_save_offset + frame_off));
27614 rtx mem = gen_frame_mem (SImode, addr);
27616 /* If we didn't copy cr before, do so now using r0. */
27617 if (cr_save_rtx == NULL_RTX)
27619 START_USE (0);
27620 cr_save_rtx = gen_rtx_REG (SImode, 0);
27621 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27624 /* Saving CR requires a two-instruction sequence: one instruction
27625 to move the CR to a general-purpose register, and a second
27626 instruction that stores the GPR to memory.
27628 We do not emit any DWARF CFI records for the first of these,
27629 because we cannot properly represent the fact that CR is saved in
27630 a register. One reason is that we cannot express that multiple
27631 CR fields are saved; another reason is that on 64-bit, the size
27632 of the CR register in DWARF (4 bytes) differs from the size of
27633 a general-purpose register.
27635 This means if any intervening instruction were to clobber one of
27636 the call-saved CR fields, we'd have incorrect CFI. To prevent
27637 this from happening, we mark the store to memory as a use of
27638 those CR fields, which prevents any such instruction from being
27639 scheduled in between the two instructions. */
27640 rtx crsave_v[9];
27641 int n_crsave = 0;
27642 int i;
27644 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27645 for (i = 0; i < 8; i++)
27646 if (save_reg_p (CR0_REGNO + i))
27647 crsave_v[n_crsave++]
27648 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27650 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27651 gen_rtvec_v (n_crsave, crsave_v)));
27652 END_USE (REGNO (cr_save_rtx));
27654 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27655 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27656 so we need to construct a frame expression manually. */
27657 RTX_FRAME_RELATED_P (insn) = 1;
27659 /* Update address to be stack-pointer relative, like
27660 rs6000_frame_related would do. */
27661 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27662 GEN_INT (info->cr_save_offset + sp_off));
27663 mem = gen_frame_mem (SImode, addr);
27665 if (DEFAULT_ABI == ABI_ELFv2)
27667 /* In the ELFv2 ABI we generate separate CFI records for each
27668 CR field that was actually saved. They all point to the
27669 same 32-bit stack slot. */
27670 rtx crframe[8];
27671 int n_crframe = 0;
27673 for (i = 0; i < 8; i++)
27674 if (save_reg_p (CR0_REGNO + i))
27676 crframe[n_crframe]
27677 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27679 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27680 n_crframe++;
27683 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27684 gen_rtx_PARALLEL (VOIDmode,
27685 gen_rtvec_v (n_crframe, crframe)));
27687 else
27689 /* In other ABIs, by convention, we use a single CR regnum to
27690 represent the fact that all call-saved CR fields are saved.
27691 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27692 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27693 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27697 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27698 *separate* slots if the routine calls __builtin_eh_return, so
27699 that they can be independently restored by the unwinder. */
27700 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27702 int i, cr_off = info->ehcr_offset;
27703 rtx crsave;
27705 /* ??? We might get better performance by using multiple mfocrf
27706 instructions. */
27707 crsave = gen_rtx_REG (SImode, 0);
27708 emit_insn (gen_prologue_movesi_from_cr (crsave));
27710 for (i = 0; i < 8; i++)
27711 if (!call_used_regs[CR0_REGNO + i])
27713 rtvec p = rtvec_alloc (2);
27714 RTVEC_ELT (p, 0)
27715 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27716 RTVEC_ELT (p, 1)
27717 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27719 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27721 RTX_FRAME_RELATED_P (insn) = 1;
27722 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27723 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27724 sp_reg_rtx, cr_off + sp_off));
27726 cr_off += reg_size;
27730 /* If we are emitting stack probes, but allocate no stack, then
27731 just note that in the dump file. */
27732 if (flag_stack_clash_protection
27733 && dump_file
27734 && !info->push_p)
27735 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27737 /* Update stack and set back pointer unless this is V.4,
27738 for which it was done previously. */
27739 if (!WORLD_SAVE_P (info) && info->push_p
27740 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27742 rtx ptr_reg = NULL;
27743 int ptr_off = 0;
27745 /* If saving altivec regs we need to be able to address all save
27746 locations using a 16-bit offset. */
27747 if ((strategy & SAVE_INLINE_VRS) == 0
27748 || (info->altivec_size != 0
27749 && (info->altivec_save_offset + info->altivec_size - 16
27750 + info->total_size - frame_off) > 32767)
27751 || (info->vrsave_size != 0
27752 && (info->vrsave_save_offset
27753 + info->total_size - frame_off) > 32767))
27755 int sel = SAVRES_SAVE | SAVRES_VR;
27756 unsigned ptr_regno = ptr_regno_for_savres (sel);
27758 if (using_static_chain_p
27759 && ptr_regno == STATIC_CHAIN_REGNUM)
27760 ptr_regno = 12;
27761 if (REGNO (frame_reg_rtx) != ptr_regno)
27762 START_USE (ptr_regno);
27763 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27764 frame_reg_rtx = ptr_reg;
27765 ptr_off = info->altivec_save_offset + info->altivec_size;
27766 frame_off = -ptr_off;
27768 else if (REGNO (frame_reg_rtx) == 1)
27769 frame_off = info->total_size;
27770 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27771 ptr_reg, ptr_off);
27772 if (REGNO (frame_reg_rtx) == 12)
27773 sp_adjust = 0;
27774 sp_off = info->total_size;
27775 if (frame_reg_rtx != sp_reg_rtx)
27776 rs6000_emit_stack_tie (frame_reg_rtx, false);
27779 /* Set frame pointer, if needed. */
27780 if (frame_pointer_needed)
27782 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27783 sp_reg_rtx);
27784 RTX_FRAME_RELATED_P (insn) = 1;
27787 /* Save AltiVec registers if needed. Save here because the red zone does
27788 not always include AltiVec registers. */
27789 if (!WORLD_SAVE_P (info)
27790 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27792 int end_save = info->altivec_save_offset + info->altivec_size;
27793 int ptr_off;
27794 /* Oddly, the vector save/restore functions point r0 at the end
27795 of the save area, then use r11 or r12 to load offsets for
27796 [reg+reg] addressing. */
27797 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27798 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27799 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27801 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27802 NOT_INUSE (0);
27803 if (scratch_regno == 12)
27804 sp_adjust = 0;
27805 if (end_save + frame_off != 0)
27807 rtx offset = GEN_INT (end_save + frame_off);
27809 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27811 else
27812 emit_move_insn (ptr_reg, frame_reg_rtx);
27814 ptr_off = -end_save;
27815 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27816 info->altivec_save_offset + ptr_off,
27817 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27818 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27819 NULL_RTX, NULL_RTX);
27820 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27822 /* The oddity mentioned above clobbered our frame reg. */
27823 emit_move_insn (frame_reg_rtx, ptr_reg);
27824 frame_off = ptr_off;
27827 else if (!WORLD_SAVE_P (info)
27828 && info->altivec_size != 0)
27830 int i;
27832 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27833 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27835 rtx areg, savereg, mem;
27836 HOST_WIDE_INT offset;
27838 offset = (info->altivec_save_offset + frame_off
27839 + 16 * (i - info->first_altivec_reg_save));
27841 savereg = gen_rtx_REG (V4SImode, i);
27843 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27845 mem = gen_frame_mem (V4SImode,
27846 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27847 GEN_INT (offset)));
27848 insn = emit_insn (gen_rtx_SET (mem, savereg));
27849 areg = NULL_RTX;
27851 else
27853 NOT_INUSE (0);
27854 areg = gen_rtx_REG (Pmode, 0);
27855 emit_move_insn (areg, GEN_INT (offset));
27857 /* AltiVec addressing mode is [reg+reg]. */
27858 mem = gen_frame_mem (V4SImode,
27859 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27861 /* Rather than emitting a generic move, force use of the stvx
27862 instruction, which we always want on ISA 2.07 (power8) systems.
27863 In particular we don't want xxpermdi/stxvd2x for little
27864 endian. */
27865 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27868 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27869 areg, GEN_INT (offset));
27873 /* VRSAVE is a bit vector representing which AltiVec registers
27874 are used. The OS uses this to determine which vector
27875 registers to save on a context switch. We need to save
27876 VRSAVE on the stack frame, add whatever AltiVec registers we
27877 used in this function, and do the corresponding magic in the
27878 epilogue. */
27880 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27882 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27883 be using r12 as frame_reg_rtx and r11 as the static chain
27884 pointer for nested functions. */
27885 int save_regno = 12;
27886 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27887 && !using_static_chain_p)
27888 save_regno = 11;
27889 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27891 save_regno = 11;
27892 if (using_static_chain_p)
27893 save_regno = 0;
27895 NOT_INUSE (save_regno);
27897 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27900 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27901 if (!TARGET_SINGLE_PIC_BASE
27902 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27903 && !constant_pool_empty_p ())
27904 || (DEFAULT_ABI == ABI_V4
27905 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27906 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27908 /* If emit_load_toc_table will use the link register, we need to save
27909 it. We use R12 for this purpose because emit_load_toc_table
27910 can use register 0. This allows us to use a plain 'blr' to return
27911 from the procedure more often. */
27912 int save_LR_around_toc_setup = (TARGET_ELF
27913 && DEFAULT_ABI == ABI_V4
27914 && flag_pic
27915 && ! info->lr_save_p
27916 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27917 if (save_LR_around_toc_setup)
27919 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27920 rtx tmp = gen_rtx_REG (Pmode, 12);
27922 sp_adjust = 0;
27923 insn = emit_move_insn (tmp, lr);
27924 RTX_FRAME_RELATED_P (insn) = 1;
27926 rs6000_emit_load_toc_table (TRUE);
27928 insn = emit_move_insn (lr, tmp);
27929 add_reg_note (insn, REG_CFA_RESTORE, lr);
27930 RTX_FRAME_RELATED_P (insn) = 1;
27932 else
27933 rs6000_emit_load_toc_table (TRUE);
27936 #if TARGET_MACHO
27937 if (!TARGET_SINGLE_PIC_BASE
27938 && DEFAULT_ABI == ABI_DARWIN
27939 && flag_pic && crtl->uses_pic_offset_table)
27941 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27942 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27944 /* Save and restore LR locally around this call (in R0). */
27945 if (!info->lr_save_p)
27946 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27948 emit_insn (gen_load_macho_picbase (src));
27950 emit_move_insn (gen_rtx_REG (Pmode,
27951 RS6000_PIC_OFFSET_TABLE_REGNUM),
27952 lr);
27954 if (!info->lr_save_p)
27955 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27957 #endif
27959 /* If we need to, save the TOC register after doing the stack setup.
27960 Do not emit eh frame info for this save. The unwinder wants info,
27961 conceptually attached to instructions in this function, about
27962 register values in the caller of this function. This R2 may have
27963 already been changed from the value in the caller.
27964 We don't attempt to write accurate DWARF EH frame info for R2
27965 because code emitted by gcc for a (non-pointer) function call
27966 doesn't save and restore R2. Instead, R2 is managed out-of-line
27967 by a linker generated plt call stub when the function resides in
27968 a shared library. This behavior is costly to describe in DWARF,
27969 both in terms of the size of DWARF info and the time taken in the
27970 unwinder to interpret it. R2 changes, apart from the
27971 calls_eh_return case earlier in this function, are handled by
27972 linux-unwind.h frob_update_context. */
27973 if (rs6000_save_toc_in_prologue_p ()
27974 && !cfun->machine->toc_is_wrapped_separately)
27976 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27977 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27980 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27981 if (using_split_stack && split_stack_arg_pointer_used_p ())
27982 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27985 /* Output .extern statements for the save/restore routines we use. */
27987 static void
27988 rs6000_output_savres_externs (FILE *file)
27990 rs6000_stack_t *info = rs6000_stack_info ();
27992 if (TARGET_DEBUG_STACK)
27993 debug_stack_info (info);
27995 /* Write .extern for any function we will call to save and restore
27996 fp values. */
27997 if (info->first_fp_reg_save < 64
27998 && !TARGET_MACHO
27999 && !TARGET_ELF)
28001 char *name;
28002 int regno = info->first_fp_reg_save - 32;
28004 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
28006 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
28007 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
28008 name = rs6000_savres_routine_name (regno, sel);
28009 fprintf (file, "\t.extern %s\n", name);
28011 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
28013 bool lr = (info->savres_strategy
28014 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28015 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28016 name = rs6000_savres_routine_name (regno, sel);
28017 fprintf (file, "\t.extern %s\n", name);
28022 /* Write function prologue. */
28024 static void
28025 rs6000_output_function_prologue (FILE *file)
28027 if (!cfun->is_thunk)
28028 rs6000_output_savres_externs (file);
28030 /* ELFv2 ABI r2 setup code and local entry point. This must follow
28031 immediately after the global entry point label. */
28032 if (rs6000_global_entry_point_needed_p ())
28034 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28036 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
28038 if (TARGET_CMODEL != CMODEL_LARGE)
28040 /* In the small and medium code models, we assume the TOC is less
28041 2 GB away from the text section, so it can be computed via the
28042 following two-instruction sequence. */
28043 char buf[256];
28045 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28046 fprintf (file, "0:\taddis 2,12,.TOC.-");
28047 assemble_name (file, buf);
28048 fprintf (file, "@ha\n");
28049 fprintf (file, "\taddi 2,2,.TOC.-");
28050 assemble_name (file, buf);
28051 fprintf (file, "@l\n");
28053 else
28055 /* In the large code model, we allow arbitrary offsets between the
28056 TOC and the text section, so we have to load the offset from
28057 memory. The data field is emitted directly before the global
28058 entry point in rs6000_elf_declare_function_name. */
28059 char buf[256];
28061 #ifdef HAVE_AS_ENTRY_MARKERS
28062 /* If supported by the linker, emit a marker relocation. If the
28063 total code size of the final executable or shared library
28064 happens to fit into 2 GB after all, the linker will replace
28065 this code sequence with the sequence for the small or medium
28066 code model. */
28067 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
28068 #endif
28069 fprintf (file, "\tld 2,");
28070 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
28071 assemble_name (file, buf);
28072 fprintf (file, "-");
28073 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28074 assemble_name (file, buf);
28075 fprintf (file, "(12)\n");
28076 fprintf (file, "\tadd 2,2,12\n");
28079 fputs ("\t.localentry\t", file);
28080 assemble_name (file, name);
28081 fputs (",.-", file);
28082 assemble_name (file, name);
28083 fputs ("\n", file);
28086 /* Output -mprofile-kernel code. This needs to be done here instead of
28087 in output_function_profile since it must go after the ELFv2 ABI
28088 local entry point. */
28089 if (TARGET_PROFILE_KERNEL && crtl->profile)
28091 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28092 gcc_assert (!TARGET_32BIT);
28094 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
28096 /* In the ELFv2 ABI we have no compiler stack word. It must be
28097 the resposibility of _mcount to preserve the static chain
28098 register if required. */
28099 if (DEFAULT_ABI != ABI_ELFv2
28100 && cfun->static_chain_decl != NULL)
28102 asm_fprintf (file, "\tstd %s,24(%s)\n",
28103 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28104 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28105 asm_fprintf (file, "\tld %s,24(%s)\n",
28106 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28108 else
28109 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28112 rs6000_pic_labelno++;
28115 /* -mprofile-kernel code calls mcount before the function prolog,
28116 so a profiled leaf function should stay a leaf function. */
28117 static bool
28118 rs6000_keep_leaf_when_profiled ()
28120 return TARGET_PROFILE_KERNEL;
28123 /* Non-zero if vmx regs are restored before the frame pop, zero if
28124 we restore after the pop when possible. */
28125 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28127 /* Restoring cr is a two step process: loading a reg from the frame
28128 save, then moving the reg to cr. For ABI_V4 we must let the
28129 unwinder know that the stack location is no longer valid at or
28130 before the stack deallocation, but we can't emit a cfa_restore for
28131 cr at the stack deallocation like we do for other registers.
28132 The trouble is that it is possible for the move to cr to be
28133 scheduled after the stack deallocation. So say exactly where cr
28134 is located on each of the two insns. */
28136 static rtx
28137 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28139 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28140 rtx reg = gen_rtx_REG (SImode, regno);
28141 rtx_insn *insn = emit_move_insn (reg, mem);
28143 if (!exit_func && DEFAULT_ABI == ABI_V4)
28145 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28146 rtx set = gen_rtx_SET (reg, cr);
28148 add_reg_note (insn, REG_CFA_REGISTER, set);
28149 RTX_FRAME_RELATED_P (insn) = 1;
28151 return reg;
28154 /* Reload CR from REG. */
28156 static void
28157 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28159 int count = 0;
28160 int i;
28162 if (using_mfcr_multiple)
28164 for (i = 0; i < 8; i++)
28165 if (save_reg_p (CR0_REGNO + i))
28166 count++;
28167 gcc_assert (count);
28170 if (using_mfcr_multiple && count > 1)
28172 rtx_insn *insn;
28173 rtvec p;
28174 int ndx;
28176 p = rtvec_alloc (count);
28178 ndx = 0;
28179 for (i = 0; i < 8; i++)
28180 if (save_reg_p (CR0_REGNO + i))
28182 rtvec r = rtvec_alloc (2);
28183 RTVEC_ELT (r, 0) = reg;
28184 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28185 RTVEC_ELT (p, ndx) =
28186 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28187 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28188 ndx++;
28190 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28191 gcc_assert (ndx == count);
28193 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28194 CR field separately. */
28195 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28197 for (i = 0; i < 8; i++)
28198 if (save_reg_p (CR0_REGNO + i))
28199 add_reg_note (insn, REG_CFA_RESTORE,
28200 gen_rtx_REG (SImode, CR0_REGNO + i));
28202 RTX_FRAME_RELATED_P (insn) = 1;
28205 else
28206 for (i = 0; i < 8; i++)
28207 if (save_reg_p (CR0_REGNO + i))
28209 rtx insn = emit_insn (gen_movsi_to_cr_one
28210 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28212 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28213 CR field separately, attached to the insn that in fact
28214 restores this particular CR field. */
28215 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28217 add_reg_note (insn, REG_CFA_RESTORE,
28218 gen_rtx_REG (SImode, CR0_REGNO + i));
28220 RTX_FRAME_RELATED_P (insn) = 1;
28224 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28225 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28226 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28228 rtx_insn *insn = get_last_insn ();
28229 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28231 add_reg_note (insn, REG_CFA_RESTORE, cr);
28232 RTX_FRAME_RELATED_P (insn) = 1;
28236 /* Like cr, the move to lr instruction can be scheduled after the
28237 stack deallocation, but unlike cr, its stack frame save is still
28238 valid. So we only need to emit the cfa_restore on the correct
28239 instruction. */
28241 static void
28242 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28244 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28245 rtx reg = gen_rtx_REG (Pmode, regno);
28247 emit_move_insn (reg, mem);
28250 static void
28251 restore_saved_lr (int regno, bool exit_func)
28253 rtx reg = gen_rtx_REG (Pmode, regno);
28254 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28255 rtx_insn *insn = emit_move_insn (lr, reg);
28257 if (!exit_func && flag_shrink_wrap)
28259 add_reg_note (insn, REG_CFA_RESTORE, lr);
28260 RTX_FRAME_RELATED_P (insn) = 1;
28264 static rtx
28265 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28267 if (DEFAULT_ABI == ABI_ELFv2)
28269 int i;
28270 for (i = 0; i < 8; i++)
28271 if (save_reg_p (CR0_REGNO + i))
28273 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28274 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28275 cfa_restores);
28278 else if (info->cr_save_p)
28279 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28280 gen_rtx_REG (SImode, CR2_REGNO),
28281 cfa_restores);
28283 if (info->lr_save_p)
28284 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28285 gen_rtx_REG (Pmode, LR_REGNO),
28286 cfa_restores);
28287 return cfa_restores;
28290 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28291 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28292 below stack pointer not cloberred by signals. */
28294 static inline bool
28295 offset_below_red_zone_p (HOST_WIDE_INT offset)
28297 return offset < (DEFAULT_ABI == ABI_V4
28299 : TARGET_32BIT ? -220 : -288);
28302 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28304 static void
28305 emit_cfa_restores (rtx cfa_restores)
28307 rtx_insn *insn = get_last_insn ();
28308 rtx *loc = &REG_NOTES (insn);
28310 while (*loc)
28311 loc = &XEXP (*loc, 1);
28312 *loc = cfa_restores;
28313 RTX_FRAME_RELATED_P (insn) = 1;
28316 /* Emit function epilogue as insns. */
28318 void
28319 rs6000_emit_epilogue (int sibcall)
28321 rs6000_stack_t *info;
28322 int restoring_GPRs_inline;
28323 int restoring_FPRs_inline;
28324 int using_load_multiple;
28325 int using_mtcr_multiple;
28326 int use_backchain_to_restore_sp;
28327 int restore_lr;
28328 int strategy;
28329 HOST_WIDE_INT frame_off = 0;
28330 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28331 rtx frame_reg_rtx = sp_reg_rtx;
28332 rtx cfa_restores = NULL_RTX;
28333 rtx insn;
28334 rtx cr_save_reg = NULL_RTX;
28335 machine_mode reg_mode = Pmode;
28336 int reg_size = TARGET_32BIT ? 4 : 8;
28337 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
28338 ? DFmode : SFmode;
28339 int fp_reg_size = 8;
28340 int i;
28341 bool exit_func;
28342 unsigned ptr_regno;
28344 info = rs6000_stack_info ();
28346 strategy = info->savres_strategy;
28347 using_load_multiple = strategy & REST_MULTIPLE;
28348 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28349 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28350 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28351 || rs6000_tune == PROCESSOR_PPC603
28352 || rs6000_tune == PROCESSOR_PPC750
28353 || optimize_size);
28354 /* Restore via the backchain when we have a large frame, since this
28355 is more efficient than an addis, addi pair. The second condition
28356 here will not trigger at the moment; We don't actually need a
28357 frame pointer for alloca, but the generic parts of the compiler
28358 give us one anyway. */
28359 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28360 ? info->lr_save_offset
28361 : 0) > 32767
28362 || (cfun->calls_alloca
28363 && !frame_pointer_needed));
28364 restore_lr = (info->lr_save_p
28365 && (restoring_FPRs_inline
28366 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28367 && (restoring_GPRs_inline
28368 || info->first_fp_reg_save < 64)
28369 && !cfun->machine->lr_is_wrapped_separately);
28372 if (WORLD_SAVE_P (info))
28374 int i, j;
28375 char rname[30];
28376 const char *alloc_rname;
28377 rtvec p;
28379 /* eh_rest_world_r10 will return to the location saved in the LR
28380 stack slot (which is not likely to be our caller.)
28381 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28382 rest_world is similar, except any R10 parameter is ignored.
28383 The exception-handling stuff that was here in 2.95 is no
28384 longer necessary. */
28386 p = rtvec_alloc (9
28387 + 32 - info->first_gp_reg_save
28388 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28389 + 63 + 1 - info->first_fp_reg_save);
28391 strcpy (rname, ((crtl->calls_eh_return) ?
28392 "*eh_rest_world_r10" : "*rest_world"));
28393 alloc_rname = ggc_strdup (rname);
28395 j = 0;
28396 RTVEC_ELT (p, j++) = ret_rtx;
28397 RTVEC_ELT (p, j++)
28398 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28399 /* The instruction pattern requires a clobber here;
28400 it is shared with the restVEC helper. */
28401 RTVEC_ELT (p, j++)
28402 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28405 /* CR register traditionally saved as CR2. */
28406 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28407 RTVEC_ELT (p, j++)
28408 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28409 if (flag_shrink_wrap)
28411 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28412 gen_rtx_REG (Pmode, LR_REGNO),
28413 cfa_restores);
28414 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28418 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28420 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28421 RTVEC_ELT (p, j++)
28422 = gen_frame_load (reg,
28423 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28424 if (flag_shrink_wrap
28425 && save_reg_p (info->first_gp_reg_save + i))
28426 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28428 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28430 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28431 RTVEC_ELT (p, j++)
28432 = gen_frame_load (reg,
28433 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28434 if (flag_shrink_wrap
28435 && save_reg_p (info->first_altivec_reg_save + i))
28436 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28438 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28440 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28441 ? DFmode : SFmode),
28442 info->first_fp_reg_save + i);
28443 RTVEC_ELT (p, j++)
28444 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28445 if (flag_shrink_wrap
28446 && save_reg_p (info->first_fp_reg_save + i))
28447 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28449 RTVEC_ELT (p, j++)
28450 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28451 RTVEC_ELT (p, j++)
28452 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28453 RTVEC_ELT (p, j++)
28454 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28455 RTVEC_ELT (p, j++)
28456 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28457 RTVEC_ELT (p, j++)
28458 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28459 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28461 if (flag_shrink_wrap)
28463 REG_NOTES (insn) = cfa_restores;
28464 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28465 RTX_FRAME_RELATED_P (insn) = 1;
28467 return;
28470 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28471 if (info->push_p)
28472 frame_off = info->total_size;
28474 /* Restore AltiVec registers if we must do so before adjusting the
28475 stack. */
28476 if (info->altivec_size != 0
28477 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28478 || (DEFAULT_ABI != ABI_V4
28479 && offset_below_red_zone_p (info->altivec_save_offset))))
28481 int i;
28482 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28484 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28485 if (use_backchain_to_restore_sp)
28487 int frame_regno = 11;
28489 if ((strategy & REST_INLINE_VRS) == 0)
28491 /* Of r11 and r12, select the one not clobbered by an
28492 out-of-line restore function for the frame register. */
28493 frame_regno = 11 + 12 - scratch_regno;
28495 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28496 emit_move_insn (frame_reg_rtx,
28497 gen_rtx_MEM (Pmode, sp_reg_rtx));
28498 frame_off = 0;
28500 else if (frame_pointer_needed)
28501 frame_reg_rtx = hard_frame_pointer_rtx;
28503 if ((strategy & REST_INLINE_VRS) == 0)
28505 int end_save = info->altivec_save_offset + info->altivec_size;
28506 int ptr_off;
28507 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28508 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28510 if (end_save + frame_off != 0)
28512 rtx offset = GEN_INT (end_save + frame_off);
28514 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28516 else
28517 emit_move_insn (ptr_reg, frame_reg_rtx);
28519 ptr_off = -end_save;
28520 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28521 info->altivec_save_offset + ptr_off,
28522 0, V4SImode, SAVRES_VR);
28524 else
28526 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28527 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28529 rtx addr, areg, mem, insn;
28530 rtx reg = gen_rtx_REG (V4SImode, i);
28531 HOST_WIDE_INT offset
28532 = (info->altivec_save_offset + frame_off
28533 + 16 * (i - info->first_altivec_reg_save));
28535 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28537 mem = gen_frame_mem (V4SImode,
28538 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28539 GEN_INT (offset)));
28540 insn = gen_rtx_SET (reg, mem);
28542 else
28544 areg = gen_rtx_REG (Pmode, 0);
28545 emit_move_insn (areg, GEN_INT (offset));
28547 /* AltiVec addressing mode is [reg+reg]. */
28548 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28549 mem = gen_frame_mem (V4SImode, addr);
28551 /* Rather than emitting a generic move, force use of the
28552 lvx instruction, which we always want. In particular we
28553 don't want lxvd2x/xxpermdi for little endian. */
28554 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28557 (void) emit_insn (insn);
28561 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28562 if (((strategy & REST_INLINE_VRS) == 0
28563 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28564 && (flag_shrink_wrap
28565 || (offset_below_red_zone_p
28566 (info->altivec_save_offset
28567 + 16 * (i - info->first_altivec_reg_save))))
28568 && save_reg_p (i))
28570 rtx reg = gen_rtx_REG (V4SImode, i);
28571 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28575 /* Restore VRSAVE if we must do so before adjusting the stack. */
28576 if (info->vrsave_size != 0
28577 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28578 || (DEFAULT_ABI != ABI_V4
28579 && offset_below_red_zone_p (info->vrsave_save_offset))))
28581 rtx reg;
28583 if (frame_reg_rtx == sp_reg_rtx)
28585 if (use_backchain_to_restore_sp)
28587 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28588 emit_move_insn (frame_reg_rtx,
28589 gen_rtx_MEM (Pmode, sp_reg_rtx));
28590 frame_off = 0;
28592 else if (frame_pointer_needed)
28593 frame_reg_rtx = hard_frame_pointer_rtx;
28596 reg = gen_rtx_REG (SImode, 12);
28597 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28598 info->vrsave_save_offset + frame_off));
28600 emit_insn (generate_set_vrsave (reg, info, 1));
28603 insn = NULL_RTX;
28604 /* If we have a large stack frame, restore the old stack pointer
28605 using the backchain. */
28606 if (use_backchain_to_restore_sp)
28608 if (frame_reg_rtx == sp_reg_rtx)
28610 /* Under V.4, don't reset the stack pointer until after we're done
28611 loading the saved registers. */
28612 if (DEFAULT_ABI == ABI_V4)
28613 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28615 insn = emit_move_insn (frame_reg_rtx,
28616 gen_rtx_MEM (Pmode, sp_reg_rtx));
28617 frame_off = 0;
28619 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28620 && DEFAULT_ABI == ABI_V4)
28621 /* frame_reg_rtx has been set up by the altivec restore. */
28623 else
28625 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28626 frame_reg_rtx = sp_reg_rtx;
28629 /* If we have a frame pointer, we can restore the old stack pointer
28630 from it. */
28631 else if (frame_pointer_needed)
28633 frame_reg_rtx = sp_reg_rtx;
28634 if (DEFAULT_ABI == ABI_V4)
28635 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28636 /* Prevent reordering memory accesses against stack pointer restore. */
28637 else if (cfun->calls_alloca
28638 || offset_below_red_zone_p (-info->total_size))
28639 rs6000_emit_stack_tie (frame_reg_rtx, true);
28641 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28642 GEN_INT (info->total_size)));
28643 frame_off = 0;
28645 else if (info->push_p
28646 && DEFAULT_ABI != ABI_V4
28647 && !crtl->calls_eh_return)
28649 /* Prevent reordering memory accesses against stack pointer restore. */
28650 if (cfun->calls_alloca
28651 || offset_below_red_zone_p (-info->total_size))
28652 rs6000_emit_stack_tie (frame_reg_rtx, false);
28653 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28654 GEN_INT (info->total_size)));
28655 frame_off = 0;
28657 if (insn && frame_reg_rtx == sp_reg_rtx)
28659 if (cfa_restores)
28661 REG_NOTES (insn) = cfa_restores;
28662 cfa_restores = NULL_RTX;
28664 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28665 RTX_FRAME_RELATED_P (insn) = 1;
28668 /* Restore AltiVec registers if we have not done so already. */
28669 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28670 && info->altivec_size != 0
28671 && (DEFAULT_ABI == ABI_V4
28672 || !offset_below_red_zone_p (info->altivec_save_offset)))
28674 int i;
28676 if ((strategy & REST_INLINE_VRS) == 0)
28678 int end_save = info->altivec_save_offset + info->altivec_size;
28679 int ptr_off;
28680 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28681 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28682 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28684 if (end_save + frame_off != 0)
28686 rtx offset = GEN_INT (end_save + frame_off);
28688 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28690 else
28691 emit_move_insn (ptr_reg, frame_reg_rtx);
28693 ptr_off = -end_save;
28694 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28695 info->altivec_save_offset + ptr_off,
28696 0, V4SImode, SAVRES_VR);
28697 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28699 /* Frame reg was clobbered by out-of-line save. Restore it
28700 from ptr_reg, and if we are calling out-of-line gpr or
28701 fpr restore set up the correct pointer and offset. */
28702 unsigned newptr_regno = 1;
28703 if (!restoring_GPRs_inline)
28705 bool lr = info->gp_save_offset + info->gp_size == 0;
28706 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28707 newptr_regno = ptr_regno_for_savres (sel);
28708 end_save = info->gp_save_offset + info->gp_size;
28710 else if (!restoring_FPRs_inline)
28712 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28713 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28714 newptr_regno = ptr_regno_for_savres (sel);
28715 end_save = info->fp_save_offset + info->fp_size;
28718 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28719 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28721 if (end_save + ptr_off != 0)
28723 rtx offset = GEN_INT (end_save + ptr_off);
28725 frame_off = -end_save;
28726 if (TARGET_32BIT)
28727 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28728 ptr_reg, offset));
28729 else
28730 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28731 ptr_reg, offset));
28733 else
28735 frame_off = ptr_off;
28736 emit_move_insn (frame_reg_rtx, ptr_reg);
28740 else
28742 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28743 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28745 rtx addr, areg, mem, insn;
28746 rtx reg = gen_rtx_REG (V4SImode, i);
28747 HOST_WIDE_INT offset
28748 = (info->altivec_save_offset + frame_off
28749 + 16 * (i - info->first_altivec_reg_save));
28751 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28753 mem = gen_frame_mem (V4SImode,
28754 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28755 GEN_INT (offset)));
28756 insn = gen_rtx_SET (reg, mem);
28758 else
28760 areg = gen_rtx_REG (Pmode, 0);
28761 emit_move_insn (areg, GEN_INT (offset));
28763 /* AltiVec addressing mode is [reg+reg]. */
28764 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28765 mem = gen_frame_mem (V4SImode, addr);
28767 /* Rather than emitting a generic move, force use of the
28768 lvx instruction, which we always want. In particular we
28769 don't want lxvd2x/xxpermdi for little endian. */
28770 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28773 (void) emit_insn (insn);
28777 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28778 if (((strategy & REST_INLINE_VRS) == 0
28779 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28780 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28781 && save_reg_p (i))
28783 rtx reg = gen_rtx_REG (V4SImode, i);
28784 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28788 /* Restore VRSAVE if we have not done so already. */
28789 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28790 && info->vrsave_size != 0
28791 && (DEFAULT_ABI == ABI_V4
28792 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28794 rtx reg;
28796 reg = gen_rtx_REG (SImode, 12);
28797 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28798 info->vrsave_save_offset + frame_off));
28800 emit_insn (generate_set_vrsave (reg, info, 1));
28803 /* If we exit by an out-of-line restore function on ABI_V4 then that
28804 function will deallocate the stack, so we don't need to worry
28805 about the unwinder restoring cr from an invalid stack frame
28806 location. */
28807 exit_func = (!restoring_FPRs_inline
28808 || (!restoring_GPRs_inline
28809 && info->first_fp_reg_save == 64));
28811 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28812 *separate* slots if the routine calls __builtin_eh_return, so
28813 that they can be independently restored by the unwinder. */
28814 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28816 int i, cr_off = info->ehcr_offset;
28818 for (i = 0; i < 8; i++)
28819 if (!call_used_regs[CR0_REGNO + i])
28821 rtx reg = gen_rtx_REG (SImode, 0);
28822 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28823 cr_off + frame_off));
28825 insn = emit_insn (gen_movsi_to_cr_one
28826 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28828 if (!exit_func && flag_shrink_wrap)
28830 add_reg_note (insn, REG_CFA_RESTORE,
28831 gen_rtx_REG (SImode, CR0_REGNO + i));
28833 RTX_FRAME_RELATED_P (insn) = 1;
28836 cr_off += reg_size;
28840 /* Get the old lr if we saved it. If we are restoring registers
28841 out-of-line, then the out-of-line routines can do this for us. */
28842 if (restore_lr && restoring_GPRs_inline)
28843 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28845 /* Get the old cr if we saved it. */
28846 if (info->cr_save_p)
28848 unsigned cr_save_regno = 12;
28850 if (!restoring_GPRs_inline)
28852 /* Ensure we don't use the register used by the out-of-line
28853 gpr register restore below. */
28854 bool lr = info->gp_save_offset + info->gp_size == 0;
28855 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28856 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28858 if (gpr_ptr_regno == 12)
28859 cr_save_regno = 11;
28860 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28862 else if (REGNO (frame_reg_rtx) == 12)
28863 cr_save_regno = 11;
28865 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28866 info->cr_save_offset + frame_off,
28867 exit_func);
28870 /* Set LR here to try to overlap restores below. */
28871 if (restore_lr && restoring_GPRs_inline)
28872 restore_saved_lr (0, exit_func);
28874 /* Load exception handler data registers, if needed. */
28875 if (crtl->calls_eh_return)
28877 unsigned int i, regno;
28879 if (TARGET_AIX)
28881 rtx reg = gen_rtx_REG (reg_mode, 2);
28882 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28883 frame_off + RS6000_TOC_SAVE_SLOT));
28886 for (i = 0; ; ++i)
28888 rtx mem;
28890 regno = EH_RETURN_DATA_REGNO (i);
28891 if (regno == INVALID_REGNUM)
28892 break;
28894 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28895 info->ehrd_offset + frame_off
28896 + reg_size * (int) i);
28898 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28902 /* Restore GPRs. This is done as a PARALLEL if we are using
28903 the load-multiple instructions. */
28904 if (!restoring_GPRs_inline)
28906 /* We are jumping to an out-of-line function. */
28907 rtx ptr_reg;
28908 int end_save = info->gp_save_offset + info->gp_size;
28909 bool can_use_exit = end_save == 0;
28910 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28911 int ptr_off;
28913 /* Emit stack reset code if we need it. */
28914 ptr_regno = ptr_regno_for_savres (sel);
28915 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28916 if (can_use_exit)
28917 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28918 else if (end_save + frame_off != 0)
28919 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28920 GEN_INT (end_save + frame_off)));
28921 else if (REGNO (frame_reg_rtx) != ptr_regno)
28922 emit_move_insn (ptr_reg, frame_reg_rtx);
28923 if (REGNO (frame_reg_rtx) == ptr_regno)
28924 frame_off = -end_save;
28926 if (can_use_exit && info->cr_save_p)
28927 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28929 ptr_off = -end_save;
28930 rs6000_emit_savres_rtx (info, ptr_reg,
28931 info->gp_save_offset + ptr_off,
28932 info->lr_save_offset + ptr_off,
28933 reg_mode, sel);
28935 else if (using_load_multiple)
28937 rtvec p;
28938 p = rtvec_alloc (32 - info->first_gp_reg_save);
28939 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28940 RTVEC_ELT (p, i)
28941 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28942 frame_reg_rtx,
28943 info->gp_save_offset + frame_off + reg_size * i);
28944 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28946 else
28948 int offset = info->gp_save_offset + frame_off;
28949 for (i = info->first_gp_reg_save; i < 32; i++)
28951 if (save_reg_p (i)
28952 && !cfun->machine->gpr_is_wrapped_separately[i])
28954 rtx reg = gen_rtx_REG (reg_mode, i);
28955 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28958 offset += reg_size;
28962 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28964 /* If the frame pointer was used then we can't delay emitting
28965 a REG_CFA_DEF_CFA note. This must happen on the insn that
28966 restores the frame pointer, r31. We may have already emitted
28967 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28968 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28969 be harmless if emitted. */
28970 if (frame_pointer_needed)
28972 insn = get_last_insn ();
28973 add_reg_note (insn, REG_CFA_DEF_CFA,
28974 plus_constant (Pmode, frame_reg_rtx, frame_off));
28975 RTX_FRAME_RELATED_P (insn) = 1;
28978 /* Set up cfa_restores. We always need these when
28979 shrink-wrapping. If not shrink-wrapping then we only need
28980 the cfa_restore when the stack location is no longer valid.
28981 The cfa_restores must be emitted on or before the insn that
28982 invalidates the stack, and of course must not be emitted
28983 before the insn that actually does the restore. The latter
28984 is why it is a bad idea to emit the cfa_restores as a group
28985 on the last instruction here that actually does a restore:
28986 That insn may be reordered with respect to others doing
28987 restores. */
28988 if (flag_shrink_wrap
28989 && !restoring_GPRs_inline
28990 && info->first_fp_reg_save == 64)
28991 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28993 for (i = info->first_gp_reg_save; i < 32; i++)
28994 if (save_reg_p (i)
28995 && !cfun->machine->gpr_is_wrapped_separately[i])
28997 rtx reg = gen_rtx_REG (reg_mode, i);
28998 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29002 if (!restoring_GPRs_inline
29003 && info->first_fp_reg_save == 64)
29005 /* We are jumping to an out-of-line function. */
29006 if (cfa_restores)
29007 emit_cfa_restores (cfa_restores);
29008 return;
29011 if (restore_lr && !restoring_GPRs_inline)
29013 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
29014 restore_saved_lr (0, exit_func);
29017 /* Restore fpr's if we need to do it without calling a function. */
29018 if (restoring_FPRs_inline)
29020 int offset = info->fp_save_offset + frame_off;
29021 for (i = info->first_fp_reg_save; i < 64; i++)
29023 if (save_reg_p (i)
29024 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
29026 rtx reg = gen_rtx_REG (fp_reg_mode, i);
29027 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
29028 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
29029 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
29030 cfa_restores);
29033 offset += fp_reg_size;
29037 /* If we saved cr, restore it here. Just those that were used. */
29038 if (info->cr_save_p)
29039 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
29041 /* If this is V.4, unwind the stack pointer after all of the loads
29042 have been done, or set up r11 if we are restoring fp out of line. */
29043 ptr_regno = 1;
29044 if (!restoring_FPRs_inline)
29046 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29047 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
29048 ptr_regno = ptr_regno_for_savres (sel);
29051 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
29052 if (REGNO (frame_reg_rtx) == ptr_regno)
29053 frame_off = 0;
29055 if (insn && restoring_FPRs_inline)
29057 if (cfa_restores)
29059 REG_NOTES (insn) = cfa_restores;
29060 cfa_restores = NULL_RTX;
29062 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
29063 RTX_FRAME_RELATED_P (insn) = 1;
29066 if (crtl->calls_eh_return)
29068 rtx sa = EH_RETURN_STACKADJ_RTX;
29069 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
29072 if (!sibcall && restoring_FPRs_inline)
29074 if (cfa_restores)
29076 /* We can't hang the cfa_restores off a simple return,
29077 since the shrink-wrap code sometimes uses an existing
29078 return. This means there might be a path from
29079 pre-prologue code to this return, and dwarf2cfi code
29080 wants the eh_frame unwinder state to be the same on
29081 all paths to any point. So we need to emit the
29082 cfa_restores before the return. For -m64 we really
29083 don't need epilogue cfa_restores at all, except for
29084 this irritating dwarf2cfi with shrink-wrap
29085 requirement; The stack red-zone means eh_frame info
29086 from the prologue telling the unwinder to restore
29087 from the stack is perfectly good right to the end of
29088 the function. */
29089 emit_insn (gen_blockage ());
29090 emit_cfa_restores (cfa_restores);
29091 cfa_restores = NULL_RTX;
29094 emit_jump_insn (targetm.gen_simple_return ());
29097 if (!sibcall && !restoring_FPRs_inline)
29099 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29100 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
29101 int elt = 0;
29102 RTVEC_ELT (p, elt++) = ret_rtx;
29103 if (lr)
29104 RTVEC_ELT (p, elt++)
29105 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29107 /* We have to restore more than two FP registers, so branch to the
29108 restore function. It will return to our caller. */
29109 int i;
29110 int reg;
29111 rtx sym;
29113 if (flag_shrink_wrap)
29114 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29116 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
29117 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
29118 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
29119 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
29121 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29123 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
29125 RTVEC_ELT (p, elt++)
29126 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
29127 if (flag_shrink_wrap
29128 && save_reg_p (info->first_fp_reg_save + i))
29129 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29132 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29135 if (cfa_restores)
29137 if (sibcall)
29138 /* Ensure the cfa_restores are hung off an insn that won't
29139 be reordered above other restores. */
29140 emit_insn (gen_blockage ());
29142 emit_cfa_restores (cfa_restores);
29146 /* Write function epilogue. */
29148 static void
29149 rs6000_output_function_epilogue (FILE *file)
29151 #if TARGET_MACHO
29152 macho_branch_islands ();
29155 rtx_insn *insn = get_last_insn ();
29156 rtx_insn *deleted_debug_label = NULL;
29158 /* Mach-O doesn't support labels at the end of objects, so if
29159 it looks like we might want one, take special action.
29161 First, collect any sequence of deleted debug labels. */
29162 while (insn
29163 && NOTE_P (insn)
29164 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29166 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29167 notes only, instead set their CODE_LABEL_NUMBER to -1,
29168 otherwise there would be code generation differences
29169 in between -g and -g0. */
29170 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29171 deleted_debug_label = insn;
29172 insn = PREV_INSN (insn);
29175 /* Second, if we have:
29176 label:
29177 barrier
29178 then this needs to be detected, so skip past the barrier. */
29180 if (insn && BARRIER_P (insn))
29181 insn = PREV_INSN (insn);
29183 /* Up to now we've only seen notes or barriers. */
29184 if (insn)
29186 if (LABEL_P (insn)
29187 || (NOTE_P (insn)
29188 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29189 /* Trailing label: <barrier>. */
29190 fputs ("\tnop\n", file);
29191 else
29193 /* Lastly, see if we have a completely empty function body. */
29194 while (insn && ! INSN_P (insn))
29195 insn = PREV_INSN (insn);
29196 /* If we don't find any insns, we've got an empty function body;
29197 I.e. completely empty - without a return or branch. This is
29198 taken as the case where a function body has been removed
29199 because it contains an inline __builtin_unreachable(). GCC
29200 states that reaching __builtin_unreachable() means UB so we're
29201 not obliged to do anything special; however, we want
29202 non-zero-sized function bodies. To meet this, and help the
29203 user out, let's trap the case. */
29204 if (insn == NULL)
29205 fputs ("\ttrap\n", file);
29208 else if (deleted_debug_label)
29209 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29210 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29211 CODE_LABEL_NUMBER (insn) = -1;
29213 #endif
29215 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29216 on its format.
29218 We don't output a traceback table if -finhibit-size-directive was
29219 used. The documentation for -finhibit-size-directive reads
29220 ``don't output a @code{.size} assembler directive, or anything
29221 else that would cause trouble if the function is split in the
29222 middle, and the two halves are placed at locations far apart in
29223 memory.'' The traceback table has this property, since it
29224 includes the offset from the start of the function to the
29225 traceback table itself.
29227 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29228 different traceback table. */
29229 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29230 && ! flag_inhibit_size_directive
29231 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29233 const char *fname = NULL;
29234 const char *language_string = lang_hooks.name;
29235 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29236 int i;
29237 int optional_tbtab;
29238 rs6000_stack_t *info = rs6000_stack_info ();
29240 if (rs6000_traceback == traceback_full)
29241 optional_tbtab = 1;
29242 else if (rs6000_traceback == traceback_part)
29243 optional_tbtab = 0;
29244 else
29245 optional_tbtab = !optimize_size && !TARGET_ELF;
29247 if (optional_tbtab)
29249 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29250 while (*fname == '.') /* V.4 encodes . in the name */
29251 fname++;
29253 /* Need label immediately before tbtab, so we can compute
29254 its offset from the function start. */
29255 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29256 ASM_OUTPUT_LABEL (file, fname);
29259 /* The .tbtab pseudo-op can only be used for the first eight
29260 expressions, since it can't handle the possibly variable
29261 length fields that follow. However, if you omit the optional
29262 fields, the assembler outputs zeros for all optional fields
29263 anyways, giving each variable length field is minimum length
29264 (as defined in sys/debug.h). Thus we can not use the .tbtab
29265 pseudo-op at all. */
29267 /* An all-zero word flags the start of the tbtab, for debuggers
29268 that have to find it by searching forward from the entry
29269 point or from the current pc. */
29270 fputs ("\t.long 0\n", file);
29272 /* Tbtab format type. Use format type 0. */
29273 fputs ("\t.byte 0,", file);
29275 /* Language type. Unfortunately, there does not seem to be any
29276 official way to discover the language being compiled, so we
29277 use language_string.
29278 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29279 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29280 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29281 either, so for now use 0. */
29282 if (lang_GNU_C ()
29283 || ! strcmp (language_string, "GNU GIMPLE")
29284 || ! strcmp (language_string, "GNU Go")
29285 || ! strcmp (language_string, "libgccjit"))
29286 i = 0;
29287 else if (! strcmp (language_string, "GNU F77")
29288 || lang_GNU_Fortran ())
29289 i = 1;
29290 else if (! strcmp (language_string, "GNU Pascal"))
29291 i = 2;
29292 else if (! strcmp (language_string, "GNU Ada"))
29293 i = 3;
29294 else if (lang_GNU_CXX ()
29295 || ! strcmp (language_string, "GNU Objective-C++"))
29296 i = 9;
29297 else if (! strcmp (language_string, "GNU Java"))
29298 i = 13;
29299 else if (! strcmp (language_string, "GNU Objective-C"))
29300 i = 14;
29301 else
29302 gcc_unreachable ();
29303 fprintf (file, "%d,", i);
29305 /* 8 single bit fields: global linkage (not set for C extern linkage,
29306 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29307 from start of procedure stored in tbtab, internal function, function
29308 has controlled storage, function has no toc, function uses fp,
29309 function logs/aborts fp operations. */
29310 /* Assume that fp operations are used if any fp reg must be saved. */
29311 fprintf (file, "%d,",
29312 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29314 /* 6 bitfields: function is interrupt handler, name present in
29315 proc table, function calls alloca, on condition directives
29316 (controls stack walks, 3 bits), saves condition reg, saves
29317 link reg. */
29318 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29319 set up as a frame pointer, even when there is no alloca call. */
29320 fprintf (file, "%d,",
29321 ((optional_tbtab << 6)
29322 | ((optional_tbtab & frame_pointer_needed) << 5)
29323 | (info->cr_save_p << 1)
29324 | (info->lr_save_p)));
29326 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29327 (6 bits). */
29328 fprintf (file, "%d,",
29329 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29331 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29332 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29334 if (optional_tbtab)
29336 /* Compute the parameter info from the function decl argument
29337 list. */
29338 tree decl;
29339 int next_parm_info_bit = 31;
29341 for (decl = DECL_ARGUMENTS (current_function_decl);
29342 decl; decl = DECL_CHAIN (decl))
29344 rtx parameter = DECL_INCOMING_RTL (decl);
29345 machine_mode mode = GET_MODE (parameter);
29347 if (GET_CODE (parameter) == REG)
29349 if (SCALAR_FLOAT_MODE_P (mode))
29351 int bits;
29353 float_parms++;
29355 switch (mode)
29357 case E_SFmode:
29358 case E_SDmode:
29359 bits = 0x2;
29360 break;
29362 case E_DFmode:
29363 case E_DDmode:
29364 case E_TFmode:
29365 case E_TDmode:
29366 case E_IFmode:
29367 case E_KFmode:
29368 bits = 0x3;
29369 break;
29371 default:
29372 gcc_unreachable ();
29375 /* If only one bit will fit, don't or in this entry. */
29376 if (next_parm_info_bit > 0)
29377 parm_info |= (bits << (next_parm_info_bit - 1));
29378 next_parm_info_bit -= 2;
29380 else
29382 fixed_parms += ((GET_MODE_SIZE (mode)
29383 + (UNITS_PER_WORD - 1))
29384 / UNITS_PER_WORD);
29385 next_parm_info_bit -= 1;
29391 /* Number of fixed point parameters. */
29392 /* This is actually the number of words of fixed point parameters; thus
29393 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29394 fprintf (file, "%d,", fixed_parms);
29396 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29397 all on stack. */
29398 /* This is actually the number of fp registers that hold parameters;
29399 and thus the maximum value is 13. */
29400 /* Set parameters on stack bit if parameters are not in their original
29401 registers, regardless of whether they are on the stack? Xlc
29402 seems to set the bit when not optimizing. */
29403 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29405 if (optional_tbtab)
29407 /* Optional fields follow. Some are variable length. */
29409 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29410 float, 11 double float. */
29411 /* There is an entry for each parameter in a register, in the order
29412 that they occur in the parameter list. Any intervening arguments
29413 on the stack are ignored. If the list overflows a long (max
29414 possible length 34 bits) then completely leave off all elements
29415 that don't fit. */
29416 /* Only emit this long if there was at least one parameter. */
29417 if (fixed_parms || float_parms)
29418 fprintf (file, "\t.long %d\n", parm_info);
29420 /* Offset from start of code to tb table. */
29421 fputs ("\t.long ", file);
29422 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29423 RS6000_OUTPUT_BASENAME (file, fname);
29424 putc ('-', file);
29425 rs6000_output_function_entry (file, fname);
29426 putc ('\n', file);
29428 /* Interrupt handler mask. */
29429 /* Omit this long, since we never set the interrupt handler bit
29430 above. */
29432 /* Number of CTL (controlled storage) anchors. */
29433 /* Omit this long, since the has_ctl bit is never set above. */
29435 /* Displacement into stack of each CTL anchor. */
29436 /* Omit this list of longs, because there are no CTL anchors. */
29438 /* Length of function name. */
29439 if (*fname == '*')
29440 ++fname;
29441 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29443 /* Function name. */
29444 assemble_string (fname, strlen (fname));
29446 /* Register for alloca automatic storage; this is always reg 31.
29447 Only emit this if the alloca bit was set above. */
29448 if (frame_pointer_needed)
29449 fputs ("\t.byte 31\n", file);
29451 fputs ("\t.align 2\n", file);
29455 /* Arrange to define .LCTOC1 label, if not already done. */
29456 if (need_toc_init)
29458 need_toc_init = 0;
29459 if (!toc_initialized)
29461 switch_to_section (toc_section);
29462 switch_to_section (current_function_section ());
29467 /* -fsplit-stack support. */
29469 /* A SYMBOL_REF for __morestack. */
29470 static GTY(()) rtx morestack_ref;
29472 static rtx
29473 gen_add3_const (rtx rt, rtx ra, long c)
29475 if (TARGET_64BIT)
29476 return gen_adddi3 (rt, ra, GEN_INT (c));
29477 else
29478 return gen_addsi3 (rt, ra, GEN_INT (c));
29481 /* Emit -fsplit-stack prologue, which goes before the regular function
29482 prologue (at local entry point in the case of ELFv2). */
29484 void
29485 rs6000_expand_split_stack_prologue (void)
29487 rs6000_stack_t *info = rs6000_stack_info ();
29488 unsigned HOST_WIDE_INT allocate;
29489 long alloc_hi, alloc_lo;
29490 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29491 rtx_insn *insn;
29493 gcc_assert (flag_split_stack && reload_completed);
29495 if (!info->push_p)
29496 return;
29498 if (global_regs[29])
29500 error ("%qs uses register r29", "-fsplit-stack");
29501 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29502 "conflicts with %qD", global_regs_decl[29]);
29505 allocate = info->total_size;
29506 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29508 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29509 return;
29511 if (morestack_ref == NULL_RTX)
29513 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29514 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29515 | SYMBOL_FLAG_FUNCTION);
29518 r0 = gen_rtx_REG (Pmode, 0);
29519 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29520 r12 = gen_rtx_REG (Pmode, 12);
29521 emit_insn (gen_load_split_stack_limit (r0));
29522 /* Always emit two insns here to calculate the requested stack,
29523 so that the linker can edit them when adjusting size for calling
29524 non-split-stack code. */
29525 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29526 alloc_lo = -allocate - alloc_hi;
29527 if (alloc_hi != 0)
29529 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29530 if (alloc_lo != 0)
29531 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29532 else
29533 emit_insn (gen_nop ());
29535 else
29537 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29538 emit_insn (gen_nop ());
29541 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29542 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29543 ok_label = gen_label_rtx ();
29544 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29545 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29546 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29547 pc_rtx);
29548 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29549 JUMP_LABEL (insn) = ok_label;
29550 /* Mark the jump as very likely to be taken. */
29551 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29553 lr = gen_rtx_REG (Pmode, LR_REGNO);
29554 insn = emit_move_insn (r0, lr);
29555 RTX_FRAME_RELATED_P (insn) = 1;
29556 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29557 RTX_FRAME_RELATED_P (insn) = 1;
29559 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29560 const0_rtx, const0_rtx));
29561 call_fusage = NULL_RTX;
29562 use_reg (&call_fusage, r12);
29563 /* Say the call uses r0, even though it doesn't, to stop regrename
29564 from twiddling with the insns saving lr, trashing args for cfun.
29565 The insns restoring lr are similarly protected by making
29566 split_stack_return use r0. */
29567 use_reg (&call_fusage, r0);
29568 add_function_usage_to (insn, call_fusage);
29569 /* Indicate that this function can't jump to non-local gotos. */
29570 make_reg_eh_region_note_nothrow_nononlocal (insn);
29571 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29572 insn = emit_move_insn (lr, r0);
29573 add_reg_note (insn, REG_CFA_RESTORE, lr);
29574 RTX_FRAME_RELATED_P (insn) = 1;
29575 emit_insn (gen_split_stack_return ());
29577 emit_label (ok_label);
29578 LABEL_NUSES (ok_label) = 1;
29581 /* Return the internal arg pointer used for function incoming
29582 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29583 to copy it to a pseudo in order for it to be preserved over calls
29584 and suchlike. We'd really like to use a pseudo here for the
29585 internal arg pointer but data-flow analysis is not prepared to
29586 accept pseudos as live at the beginning of a function. */
29588 static rtx
29589 rs6000_internal_arg_pointer (void)
29591 if (flag_split_stack
29592 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29593 == NULL))
29596 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29598 rtx pat;
29600 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29601 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29603 /* Put the pseudo initialization right after the note at the
29604 beginning of the function. */
29605 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29606 gen_rtx_REG (Pmode, 12));
29607 push_topmost_sequence ();
29608 emit_insn_after (pat, get_insns ());
29609 pop_topmost_sequence ();
29611 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29612 FIRST_PARM_OFFSET (current_function_decl));
29613 return copy_to_reg (ret);
29615 return virtual_incoming_args_rtx;
29618 /* We may have to tell the dataflow pass that the split stack prologue
29619 is initializing a register. */
29621 static void
29622 rs6000_live_on_entry (bitmap regs)
29624 if (flag_split_stack)
29625 bitmap_set_bit (regs, 12);
29628 /* Emit -fsplit-stack dynamic stack allocation space check. */
29630 void
29631 rs6000_split_stack_space_check (rtx size, rtx label)
29633 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29634 rtx limit = gen_reg_rtx (Pmode);
29635 rtx requested = gen_reg_rtx (Pmode);
29636 rtx cmp = gen_reg_rtx (CCUNSmode);
29637 rtx jump;
29639 emit_insn (gen_load_split_stack_limit (limit));
29640 if (CONST_INT_P (size))
29641 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29642 else
29644 size = force_reg (Pmode, size);
29645 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29647 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29648 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29649 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29650 gen_rtx_LABEL_REF (VOIDmode, label),
29651 pc_rtx);
29652 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29653 JUMP_LABEL (jump) = label;
29656 /* A C compound statement that outputs the assembler code for a thunk
29657 function, used to implement C++ virtual function calls with
29658 multiple inheritance. The thunk acts as a wrapper around a virtual
29659 function, adjusting the implicit object parameter before handing
29660 control off to the real function.
29662 First, emit code to add the integer DELTA to the location that
29663 contains the incoming first argument. Assume that this argument
29664 contains a pointer, and is the one used to pass the `this' pointer
29665 in C++. This is the incoming argument *before* the function
29666 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29667 values of all other incoming arguments.
29669 After the addition, emit code to jump to FUNCTION, which is a
29670 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29671 not touch the return address. Hence returning from FUNCTION will
29672 return to whoever called the current `thunk'.
29674 The effect must be as if FUNCTION had been called directly with the
29675 adjusted first argument. This macro is responsible for emitting
29676 all of the code for a thunk function; output_function_prologue()
29677 and output_function_epilogue() are not invoked.
29679 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29680 been extracted from it.) It might possibly be useful on some
29681 targets, but probably not.
29683 If you do not define this macro, the target-independent code in the
29684 C++ frontend will generate a less efficient heavyweight thunk that
29685 calls FUNCTION instead of jumping to it. The generic approach does
29686 not support varargs. */
29688 static void
29689 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29690 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29691 tree function)
29693 rtx this_rtx, funexp;
29694 rtx_insn *insn;
29696 reload_completed = 1;
29697 epilogue_completed = 1;
29699 /* Mark the end of the (empty) prologue. */
29700 emit_note (NOTE_INSN_PROLOGUE_END);
29702 /* Find the "this" pointer. If the function returns a structure,
29703 the structure return pointer is in r3. */
29704 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29705 this_rtx = gen_rtx_REG (Pmode, 4);
29706 else
29707 this_rtx = gen_rtx_REG (Pmode, 3);
29709 /* Apply the constant offset, if required. */
29710 if (delta)
29711 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29713 /* Apply the offset from the vtable, if required. */
29714 if (vcall_offset)
29716 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29717 rtx tmp = gen_rtx_REG (Pmode, 12);
29719 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29720 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29722 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29723 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29725 else
29727 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29729 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29731 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29734 /* Generate a tail call to the target function. */
29735 if (!TREE_USED (function))
29737 assemble_external (function);
29738 TREE_USED (function) = 1;
29740 funexp = XEXP (DECL_RTL (function), 0);
29741 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29743 #if TARGET_MACHO
29744 if (MACHOPIC_INDIRECT)
29745 funexp = machopic_indirect_call_target (funexp);
29746 #endif
29748 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29749 generate sibcall RTL explicitly. */
29750 insn = emit_call_insn (
29751 gen_rtx_PARALLEL (VOIDmode,
29752 gen_rtvec (3,
29753 gen_rtx_CALL (VOIDmode,
29754 funexp, const0_rtx),
29755 gen_rtx_USE (VOIDmode, const0_rtx),
29756 simple_return_rtx)));
29757 SIBLING_CALL_P (insn) = 1;
29758 emit_barrier ();
29760 /* Run just enough of rest_of_compilation to get the insns emitted.
29761 There's not really enough bulk here to make other passes such as
29762 instruction scheduling worth while. Note that use_thunk calls
29763 assemble_start_function and assemble_end_function. */
29764 insn = get_insns ();
29765 shorten_branches (insn);
29766 final_start_function (insn, file, 1);
29767 final (insn, file, 1);
29768 final_end_function ();
29770 reload_completed = 0;
29771 epilogue_completed = 0;
29774 /* A quick summary of the various types of 'constant-pool tables'
29775 under PowerPC:
29777 Target Flags Name One table per
29778 AIX (none) AIX TOC object file
29779 AIX -mfull-toc AIX TOC object file
29780 AIX -mminimal-toc AIX minimal TOC translation unit
29781 SVR4/EABI (none) SVR4 SDATA object file
29782 SVR4/EABI -fpic SVR4 pic object file
29783 SVR4/EABI -fPIC SVR4 PIC translation unit
29784 SVR4/EABI -mrelocatable EABI TOC function
29785 SVR4/EABI -maix AIX TOC object file
29786 SVR4/EABI -maix -mminimal-toc
29787 AIX minimal TOC translation unit
29789 Name Reg. Set by entries contains:
29790 made by addrs? fp? sum?
29792 AIX TOC 2 crt0 as Y option option
29793 AIX minimal TOC 30 prolog gcc Y Y option
29794 SVR4 SDATA 13 crt0 gcc N Y N
29795 SVR4 pic 30 prolog ld Y not yet N
29796 SVR4 PIC 30 prolog gcc Y option option
29797 EABI TOC 30 prolog gcc Y option option
29801 /* Hash functions for the hash table. */
29803 static unsigned
29804 rs6000_hash_constant (rtx k)
29806 enum rtx_code code = GET_CODE (k);
29807 machine_mode mode = GET_MODE (k);
29808 unsigned result = (code << 3) ^ mode;
29809 const char *format;
29810 int flen, fidx;
29812 format = GET_RTX_FORMAT (code);
29813 flen = strlen (format);
29814 fidx = 0;
29816 switch (code)
29818 case LABEL_REF:
29819 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29821 case CONST_WIDE_INT:
29823 int i;
29824 flen = CONST_WIDE_INT_NUNITS (k);
29825 for (i = 0; i < flen; i++)
29826 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29827 return result;
29830 case CONST_DOUBLE:
29831 if (mode != VOIDmode)
29832 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29833 flen = 2;
29834 break;
29836 case CODE_LABEL:
29837 fidx = 3;
29838 break;
29840 default:
29841 break;
29844 for (; fidx < flen; fidx++)
29845 switch (format[fidx])
29847 case 's':
29849 unsigned i, len;
29850 const char *str = XSTR (k, fidx);
29851 len = strlen (str);
29852 result = result * 613 + len;
29853 for (i = 0; i < len; i++)
29854 result = result * 613 + (unsigned) str[i];
29855 break;
29857 case 'u':
29858 case 'e':
29859 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29860 break;
29861 case 'i':
29862 case 'n':
29863 result = result * 613 + (unsigned) XINT (k, fidx);
29864 break;
29865 case 'w':
29866 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29867 result = result * 613 + (unsigned) XWINT (k, fidx);
29868 else
29870 size_t i;
29871 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29872 result = result * 613 + (unsigned) (XWINT (k, fidx)
29873 >> CHAR_BIT * i);
29875 break;
29876 case '0':
29877 break;
29878 default:
29879 gcc_unreachable ();
29882 return result;
29885 hashval_t
29886 toc_hasher::hash (toc_hash_struct *thc)
29888 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29891 /* Compare H1 and H2 for equivalence. */
29893 bool
29894 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29896 rtx r1 = h1->key;
29897 rtx r2 = h2->key;
29899 if (h1->key_mode != h2->key_mode)
29900 return 0;
29902 return rtx_equal_p (r1, r2);
29905 /* These are the names given by the C++ front-end to vtables, and
29906 vtable-like objects. Ideally, this logic should not be here;
29907 instead, there should be some programmatic way of inquiring as
29908 to whether or not an object is a vtable. */
29910 #define VTABLE_NAME_P(NAME) \
29911 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29912 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29913 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29914 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29915 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29917 #ifdef NO_DOLLAR_IN_LABEL
29918 /* Return a GGC-allocated character string translating dollar signs in
29919 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29921 const char *
29922 rs6000_xcoff_strip_dollar (const char *name)
29924 char *strip, *p;
29925 const char *q;
29926 size_t len;
29928 q = (const char *) strchr (name, '$');
29930 if (q == 0 || q == name)
29931 return name;
29933 len = strlen (name);
29934 strip = XALLOCAVEC (char, len + 1);
29935 strcpy (strip, name);
29936 p = strip + (q - name);
29937 while (p)
29939 *p = '_';
29940 p = strchr (p + 1, '$');
29943 return ggc_alloc_string (strip, len);
29945 #endif
29947 void
29948 rs6000_output_symbol_ref (FILE *file, rtx x)
29950 const char *name = XSTR (x, 0);
29952 /* Currently C++ toc references to vtables can be emitted before it
29953 is decided whether the vtable is public or private. If this is
29954 the case, then the linker will eventually complain that there is
29955 a reference to an unknown section. Thus, for vtables only,
29956 we emit the TOC reference to reference the identifier and not the
29957 symbol. */
29958 if (VTABLE_NAME_P (name))
29960 RS6000_OUTPUT_BASENAME (file, name);
29962 else
29963 assemble_name (file, name);
29966 /* Output a TOC entry. We derive the entry name from what is being
29967 written. */
29969 void
29970 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29972 char buf[256];
29973 const char *name = buf;
29974 rtx base = x;
29975 HOST_WIDE_INT offset = 0;
29977 gcc_assert (!TARGET_NO_TOC);
29979 /* When the linker won't eliminate them, don't output duplicate
29980 TOC entries (this happens on AIX if there is any kind of TOC,
29981 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29982 CODE_LABELs. */
29983 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29985 struct toc_hash_struct *h;
29987 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29988 time because GGC is not initialized at that point. */
29989 if (toc_hash_table == NULL)
29990 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29992 h = ggc_alloc<toc_hash_struct> ();
29993 h->key = x;
29994 h->key_mode = mode;
29995 h->labelno = labelno;
29997 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29998 if (*found == NULL)
29999 *found = h;
30000 else /* This is indeed a duplicate.
30001 Set this label equal to that label. */
30003 fputs ("\t.set ", file);
30004 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
30005 fprintf (file, "%d,", labelno);
30006 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
30007 fprintf (file, "%d\n", ((*found)->labelno));
30009 #ifdef HAVE_AS_TLS
30010 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
30011 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
30012 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
30014 fputs ("\t.set ", file);
30015 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
30016 fprintf (file, "%d,", labelno);
30017 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
30018 fprintf (file, "%d\n", ((*found)->labelno));
30020 #endif
30021 return;
30025 /* If we're going to put a double constant in the TOC, make sure it's
30026 aligned properly when strict alignment is on. */
30027 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
30028 && STRICT_ALIGNMENT
30029 && GET_MODE_BITSIZE (mode) >= 64
30030 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
30031 ASM_OUTPUT_ALIGN (file, 3);
30034 (*targetm.asm_out.internal_label) (file, "LC", labelno);
30036 /* Handle FP constants specially. Note that if we have a minimal
30037 TOC, things we put here aren't actually in the TOC, so we can allow
30038 FP constants. */
30039 if (GET_CODE (x) == CONST_DOUBLE &&
30040 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
30041 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
30043 long k[4];
30045 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30046 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
30047 else
30048 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30050 if (TARGET_64BIT)
30052 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30053 fputs (DOUBLE_INT_ASM_OP, file);
30054 else
30055 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30056 k[0] & 0xffffffff, k[1] & 0xffffffff,
30057 k[2] & 0xffffffff, k[3] & 0xffffffff);
30058 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
30059 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30060 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
30061 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
30062 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
30063 return;
30065 else
30067 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30068 fputs ("\t.long ", file);
30069 else
30070 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30071 k[0] & 0xffffffff, k[1] & 0xffffffff,
30072 k[2] & 0xffffffff, k[3] & 0xffffffff);
30073 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
30074 k[0] & 0xffffffff, k[1] & 0xffffffff,
30075 k[2] & 0xffffffff, k[3] & 0xffffffff);
30076 return;
30079 else if (GET_CODE (x) == CONST_DOUBLE &&
30080 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
30082 long k[2];
30084 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30085 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
30086 else
30087 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30089 if (TARGET_64BIT)
30091 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30092 fputs (DOUBLE_INT_ASM_OP, file);
30093 else
30094 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30095 k[0] & 0xffffffff, k[1] & 0xffffffff);
30096 fprintf (file, "0x%lx%08lx\n",
30097 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30098 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
30099 return;
30101 else
30103 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30104 fputs ("\t.long ", file);
30105 else
30106 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30107 k[0] & 0xffffffff, k[1] & 0xffffffff);
30108 fprintf (file, "0x%lx,0x%lx\n",
30109 k[0] & 0xffffffff, k[1] & 0xffffffff);
30110 return;
30113 else if (GET_CODE (x) == CONST_DOUBLE &&
30114 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
30116 long l;
30118 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30119 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
30120 else
30121 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
30123 if (TARGET_64BIT)
30125 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30126 fputs (DOUBLE_INT_ASM_OP, file);
30127 else
30128 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30129 if (WORDS_BIG_ENDIAN)
30130 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
30131 else
30132 fprintf (file, "0x%lx\n", l & 0xffffffff);
30133 return;
30135 else
30137 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30138 fputs ("\t.long ", file);
30139 else
30140 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30141 fprintf (file, "0x%lx\n", l & 0xffffffff);
30142 return;
30145 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
30147 unsigned HOST_WIDE_INT low;
30148 HOST_WIDE_INT high;
30150 low = INTVAL (x) & 0xffffffff;
30151 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30153 /* TOC entries are always Pmode-sized, so when big-endian
30154 smaller integer constants in the TOC need to be padded.
30155 (This is still a win over putting the constants in
30156 a separate constant pool, because then we'd have
30157 to have both a TOC entry _and_ the actual constant.)
30159 For a 32-bit target, CONST_INT values are loaded and shifted
30160 entirely within `low' and can be stored in one TOC entry. */
30162 /* It would be easy to make this work, but it doesn't now. */
30163 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30165 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30167 low |= high << 32;
30168 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30169 high = (HOST_WIDE_INT) low >> 32;
30170 low &= 0xffffffff;
30173 if (TARGET_64BIT)
30175 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30176 fputs (DOUBLE_INT_ASM_OP, file);
30177 else
30178 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30179 (long) high & 0xffffffff, (long) low & 0xffffffff);
30180 fprintf (file, "0x%lx%08lx\n",
30181 (long) high & 0xffffffff, (long) low & 0xffffffff);
30182 return;
30184 else
30186 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30188 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30189 fputs ("\t.long ", file);
30190 else
30191 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30192 (long) high & 0xffffffff, (long) low & 0xffffffff);
30193 fprintf (file, "0x%lx,0x%lx\n",
30194 (long) high & 0xffffffff, (long) low & 0xffffffff);
30196 else
30198 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30199 fputs ("\t.long ", file);
30200 else
30201 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30202 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30204 return;
30208 if (GET_CODE (x) == CONST)
30210 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
30213 base = XEXP (XEXP (x, 0), 0);
30214 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30217 switch (GET_CODE (base))
30219 case SYMBOL_REF:
30220 name = XSTR (base, 0);
30221 break;
30223 case LABEL_REF:
30224 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30225 CODE_LABEL_NUMBER (XEXP (base, 0)));
30226 break;
30228 case CODE_LABEL:
30229 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30230 break;
30232 default:
30233 gcc_unreachable ();
30236 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30237 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30238 else
30240 fputs ("\t.tc ", file);
30241 RS6000_OUTPUT_BASENAME (file, name);
30243 if (offset < 0)
30244 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30245 else if (offset)
30246 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30248 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30249 after other TOC symbols, reducing overflow of small TOC access
30250 to [TC] symbols. */
30251 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30252 ? "[TE]," : "[TC],", file);
30255 /* Currently C++ toc references to vtables can be emitted before it
30256 is decided whether the vtable is public or private. If this is
30257 the case, then the linker will eventually complain that there is
30258 a TOC reference to an unknown section. Thus, for vtables only,
30259 we emit the TOC reference to reference the symbol and not the
30260 section. */
30261 if (VTABLE_NAME_P (name))
30263 RS6000_OUTPUT_BASENAME (file, name);
30264 if (offset < 0)
30265 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30266 else if (offset > 0)
30267 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30269 else
30270 output_addr_const (file, x);
30272 #if HAVE_AS_TLS
30273 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30275 switch (SYMBOL_REF_TLS_MODEL (base))
30277 case 0:
30278 break;
30279 case TLS_MODEL_LOCAL_EXEC:
30280 fputs ("@le", file);
30281 break;
30282 case TLS_MODEL_INITIAL_EXEC:
30283 fputs ("@ie", file);
30284 break;
30285 /* Use global-dynamic for local-dynamic. */
30286 case TLS_MODEL_GLOBAL_DYNAMIC:
30287 case TLS_MODEL_LOCAL_DYNAMIC:
30288 putc ('\n', file);
30289 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30290 fputs ("\t.tc .", file);
30291 RS6000_OUTPUT_BASENAME (file, name);
30292 fputs ("[TC],", file);
30293 output_addr_const (file, x);
30294 fputs ("@m", file);
30295 break;
30296 default:
30297 gcc_unreachable ();
30300 #endif
30302 putc ('\n', file);
30305 /* Output an assembler pseudo-op to write an ASCII string of N characters
30306 starting at P to FILE.
30308 On the RS/6000, we have to do this using the .byte operation and
30309 write out special characters outside the quoted string.
30310 Also, the assembler is broken; very long strings are truncated,
30311 so we must artificially break them up early. */
30313 void
30314 output_ascii (FILE *file, const char *p, int n)
30316 char c;
30317 int i, count_string;
30318 const char *for_string = "\t.byte \"";
30319 const char *for_decimal = "\t.byte ";
30320 const char *to_close = NULL;
30322 count_string = 0;
30323 for (i = 0; i < n; i++)
30325 c = *p++;
30326 if (c >= ' ' && c < 0177)
30328 if (for_string)
30329 fputs (for_string, file);
30330 putc (c, file);
30332 /* Write two quotes to get one. */
30333 if (c == '"')
30335 putc (c, file);
30336 ++count_string;
30339 for_string = NULL;
30340 for_decimal = "\"\n\t.byte ";
30341 to_close = "\"\n";
30342 ++count_string;
30344 if (count_string >= 512)
30346 fputs (to_close, file);
30348 for_string = "\t.byte \"";
30349 for_decimal = "\t.byte ";
30350 to_close = NULL;
30351 count_string = 0;
30354 else
30356 if (for_decimal)
30357 fputs (for_decimal, file);
30358 fprintf (file, "%d", c);
30360 for_string = "\n\t.byte \"";
30361 for_decimal = ", ";
30362 to_close = "\n";
30363 count_string = 0;
30367 /* Now close the string if we have written one. Then end the line. */
30368 if (to_close)
30369 fputs (to_close, file);
30372 /* Generate a unique section name for FILENAME for a section type
30373 represented by SECTION_DESC. Output goes into BUF.
30375 SECTION_DESC can be any string, as long as it is different for each
30376 possible section type.
30378 We name the section in the same manner as xlc. The name begins with an
30379 underscore followed by the filename (after stripping any leading directory
30380 names) with the last period replaced by the string SECTION_DESC. If
30381 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30382 the name. */
30384 void
30385 rs6000_gen_section_name (char **buf, const char *filename,
30386 const char *section_desc)
30388 const char *q, *after_last_slash, *last_period = 0;
30389 char *p;
30390 int len;
30392 after_last_slash = filename;
30393 for (q = filename; *q; q++)
30395 if (*q == '/')
30396 after_last_slash = q + 1;
30397 else if (*q == '.')
30398 last_period = q;
30401 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30402 *buf = (char *) xmalloc (len);
30404 p = *buf;
30405 *p++ = '_';
30407 for (q = after_last_slash; *q; q++)
30409 if (q == last_period)
30411 strcpy (p, section_desc);
30412 p += strlen (section_desc);
30413 break;
30416 else if (ISALNUM (*q))
30417 *p++ = *q;
30420 if (last_period == 0)
30421 strcpy (p, section_desc);
30422 else
30423 *p = '\0';
30426 /* Emit profile function. */
30428 void
30429 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30431 /* Non-standard profiling for kernels, which just saves LR then calls
30432 _mcount without worrying about arg saves. The idea is to change
30433 the function prologue as little as possible as it isn't easy to
30434 account for arg save/restore code added just for _mcount. */
30435 if (TARGET_PROFILE_KERNEL)
30436 return;
30438 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30440 #ifndef NO_PROFILE_COUNTERS
30441 # define NO_PROFILE_COUNTERS 0
30442 #endif
30443 if (NO_PROFILE_COUNTERS)
30444 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30445 LCT_NORMAL, VOIDmode);
30446 else
30448 char buf[30];
30449 const char *label_name;
30450 rtx fun;
30452 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30453 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30454 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30456 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30457 LCT_NORMAL, VOIDmode, fun, Pmode);
30460 else if (DEFAULT_ABI == ABI_DARWIN)
30462 const char *mcount_name = RS6000_MCOUNT;
30463 int caller_addr_regno = LR_REGNO;
30465 /* Be conservative and always set this, at least for now. */
30466 crtl->uses_pic_offset_table = 1;
30468 #if TARGET_MACHO
30469 /* For PIC code, set up a stub and collect the caller's address
30470 from r0, which is where the prologue puts it. */
30471 if (MACHOPIC_INDIRECT
30472 && crtl->uses_pic_offset_table)
30473 caller_addr_regno = 0;
30474 #endif
30475 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30476 LCT_NORMAL, VOIDmode,
30477 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30481 /* Write function profiler code. */
30483 void
30484 output_function_profiler (FILE *file, int labelno)
30486 char buf[100];
30488 switch (DEFAULT_ABI)
30490 default:
30491 gcc_unreachable ();
30493 case ABI_V4:
30494 if (!TARGET_32BIT)
30496 warning (0, "no profiling of 64-bit code for this ABI");
30497 return;
30499 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30500 fprintf (file, "\tmflr %s\n", reg_names[0]);
30501 if (NO_PROFILE_COUNTERS)
30503 asm_fprintf (file, "\tstw %s,4(%s)\n",
30504 reg_names[0], reg_names[1]);
30506 else if (TARGET_SECURE_PLT && flag_pic)
30508 if (TARGET_LINK_STACK)
30510 char name[32];
30511 get_ppc476_thunk_name (name);
30512 asm_fprintf (file, "\tbl %s\n", name);
30514 else
30515 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30516 asm_fprintf (file, "\tstw %s,4(%s)\n",
30517 reg_names[0], reg_names[1]);
30518 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30519 asm_fprintf (file, "\taddis %s,%s,",
30520 reg_names[12], reg_names[12]);
30521 assemble_name (file, buf);
30522 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30523 assemble_name (file, buf);
30524 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30526 else if (flag_pic == 1)
30528 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30529 asm_fprintf (file, "\tstw %s,4(%s)\n",
30530 reg_names[0], reg_names[1]);
30531 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30532 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30533 assemble_name (file, buf);
30534 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30536 else if (flag_pic > 1)
30538 asm_fprintf (file, "\tstw %s,4(%s)\n",
30539 reg_names[0], reg_names[1]);
30540 /* Now, we need to get the address of the label. */
30541 if (TARGET_LINK_STACK)
30543 char name[32];
30544 get_ppc476_thunk_name (name);
30545 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30546 assemble_name (file, buf);
30547 fputs ("-.\n1:", file);
30548 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30549 asm_fprintf (file, "\taddi %s,%s,4\n",
30550 reg_names[11], reg_names[11]);
30552 else
30554 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30555 assemble_name (file, buf);
30556 fputs ("-.\n1:", file);
30557 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30559 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30560 reg_names[0], reg_names[11]);
30561 asm_fprintf (file, "\tadd %s,%s,%s\n",
30562 reg_names[0], reg_names[0], reg_names[11]);
30564 else
30566 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30567 assemble_name (file, buf);
30568 fputs ("@ha\n", file);
30569 asm_fprintf (file, "\tstw %s,4(%s)\n",
30570 reg_names[0], reg_names[1]);
30571 asm_fprintf (file, "\tla %s,", reg_names[0]);
30572 assemble_name (file, buf);
30573 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30576 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30577 fprintf (file, "\tbl %s%s\n",
30578 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30579 break;
30581 case ABI_AIX:
30582 case ABI_ELFv2:
30583 case ABI_DARWIN:
30584 /* Don't do anything, done in output_profile_hook (). */
30585 break;
30591 /* The following variable value is the last issued insn. */
30593 static rtx_insn *last_scheduled_insn;
30595 /* The following variable helps to balance issuing of load and
30596 store instructions */
30598 static int load_store_pendulum;
30600 /* The following variable helps pair divide insns during scheduling. */
30601 static int divide_cnt;
30602 /* The following variable helps pair and alternate vector and vector load
30603 insns during scheduling. */
30604 static int vec_pairing;
30607 /* Power4 load update and store update instructions are cracked into a
30608 load or store and an integer insn which are executed in the same cycle.
30609 Branches have their own dispatch slot which does not count against the
30610 GCC issue rate, but it changes the program flow so there are no other
30611 instructions to issue in this cycle. */
30613 static int
30614 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30616 last_scheduled_insn = insn;
30617 if (GET_CODE (PATTERN (insn)) == USE
30618 || GET_CODE (PATTERN (insn)) == CLOBBER)
30620 cached_can_issue_more = more;
30621 return cached_can_issue_more;
30624 if (insn_terminates_group_p (insn, current_group))
30626 cached_can_issue_more = 0;
30627 return cached_can_issue_more;
30630 /* If no reservation, but reach here */
30631 if (recog_memoized (insn) < 0)
30632 return more;
30634 if (rs6000_sched_groups)
30636 if (is_microcoded_insn (insn))
30637 cached_can_issue_more = 0;
30638 else if (is_cracked_insn (insn))
30639 cached_can_issue_more = more > 2 ? more - 2 : 0;
30640 else
30641 cached_can_issue_more = more - 1;
30643 return cached_can_issue_more;
30646 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30647 return 0;
30649 cached_can_issue_more = more - 1;
30650 return cached_can_issue_more;
30653 static int
30654 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30656 int r = rs6000_variable_issue_1 (insn, more);
30657 if (verbose)
30658 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30659 return r;
30662 /* Adjust the cost of a scheduling dependency. Return the new cost of
30663 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30665 static int
30666 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30667 unsigned int)
30669 enum attr_type attr_type;
30671 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30672 return cost;
30674 switch (dep_type)
30676 case REG_DEP_TRUE:
30678 /* Data dependency; DEP_INSN writes a register that INSN reads
30679 some cycles later. */
30681 /* Separate a load from a narrower, dependent store. */
30682 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30683 && GET_CODE (PATTERN (insn)) == SET
30684 && GET_CODE (PATTERN (dep_insn)) == SET
30685 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30686 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30687 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30688 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30689 return cost + 14;
30691 attr_type = get_attr_type (insn);
30693 switch (attr_type)
30695 case TYPE_JMPREG:
30696 /* Tell the first scheduling pass about the latency between
30697 a mtctr and bctr (and mtlr and br/blr). The first
30698 scheduling pass will not know about this latency since
30699 the mtctr instruction, which has the latency associated
30700 to it, will be generated by reload. */
30701 return 4;
30702 case TYPE_BRANCH:
30703 /* Leave some extra cycles between a compare and its
30704 dependent branch, to inhibit expensive mispredicts. */
30705 if ((rs6000_tune == PROCESSOR_PPC603
30706 || rs6000_tune == PROCESSOR_PPC604
30707 || rs6000_tune == PROCESSOR_PPC604e
30708 || rs6000_tune == PROCESSOR_PPC620
30709 || rs6000_tune == PROCESSOR_PPC630
30710 || rs6000_tune == PROCESSOR_PPC750
30711 || rs6000_tune == PROCESSOR_PPC7400
30712 || rs6000_tune == PROCESSOR_PPC7450
30713 || rs6000_tune == PROCESSOR_PPCE5500
30714 || rs6000_tune == PROCESSOR_PPCE6500
30715 || rs6000_tune == PROCESSOR_POWER4
30716 || rs6000_tune == PROCESSOR_POWER5
30717 || rs6000_tune == PROCESSOR_POWER7
30718 || rs6000_tune == PROCESSOR_POWER8
30719 || rs6000_tune == PROCESSOR_POWER9
30720 || rs6000_tune == PROCESSOR_CELL)
30721 && recog_memoized (dep_insn)
30722 && (INSN_CODE (dep_insn) >= 0))
30724 switch (get_attr_type (dep_insn))
30726 case TYPE_CMP:
30727 case TYPE_FPCOMPARE:
30728 case TYPE_CR_LOGICAL:
30729 return cost + 2;
30730 case TYPE_EXTS:
30731 case TYPE_MUL:
30732 if (get_attr_dot (dep_insn) == DOT_YES)
30733 return cost + 2;
30734 else
30735 break;
30736 case TYPE_SHIFT:
30737 if (get_attr_dot (dep_insn) == DOT_YES
30738 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30739 return cost + 2;
30740 else
30741 break;
30742 default:
30743 break;
30745 break;
30747 case TYPE_STORE:
30748 case TYPE_FPSTORE:
30749 if ((rs6000_tune == PROCESSOR_POWER6)
30750 && recog_memoized (dep_insn)
30751 && (INSN_CODE (dep_insn) >= 0))
30754 if (GET_CODE (PATTERN (insn)) != SET)
30755 /* If this happens, we have to extend this to schedule
30756 optimally. Return default for now. */
30757 return cost;
30759 /* Adjust the cost for the case where the value written
30760 by a fixed point operation is used as the address
30761 gen value on a store. */
30762 switch (get_attr_type (dep_insn))
30764 case TYPE_LOAD:
30765 case TYPE_CNTLZ:
30767 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30768 return get_attr_sign_extend (dep_insn)
30769 == SIGN_EXTEND_YES ? 6 : 4;
30770 break;
30772 case TYPE_SHIFT:
30774 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30775 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30776 6 : 3;
30777 break;
30779 case TYPE_INTEGER:
30780 case TYPE_ADD:
30781 case TYPE_LOGICAL:
30782 case TYPE_EXTS:
30783 case TYPE_INSERT:
30785 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30786 return 3;
30787 break;
30789 case TYPE_STORE:
30790 case TYPE_FPLOAD:
30791 case TYPE_FPSTORE:
30793 if (get_attr_update (dep_insn) == UPDATE_YES
30794 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30795 return 3;
30796 break;
30798 case TYPE_MUL:
30800 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30801 return 17;
30802 break;
30804 case TYPE_DIV:
30806 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30807 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30808 break;
30810 default:
30811 break;
30814 break;
30816 case TYPE_LOAD:
30817 if ((rs6000_tune == PROCESSOR_POWER6)
30818 && recog_memoized (dep_insn)
30819 && (INSN_CODE (dep_insn) >= 0))
30822 /* Adjust the cost for the case where the value written
30823 by a fixed point instruction is used within the address
30824 gen portion of a subsequent load(u)(x) */
30825 switch (get_attr_type (dep_insn))
30827 case TYPE_LOAD:
30828 case TYPE_CNTLZ:
30830 if (set_to_load_agen (dep_insn, insn))
30831 return get_attr_sign_extend (dep_insn)
30832 == SIGN_EXTEND_YES ? 6 : 4;
30833 break;
30835 case TYPE_SHIFT:
30837 if (set_to_load_agen (dep_insn, insn))
30838 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30839 6 : 3;
30840 break;
30842 case TYPE_INTEGER:
30843 case TYPE_ADD:
30844 case TYPE_LOGICAL:
30845 case TYPE_EXTS:
30846 case TYPE_INSERT:
30848 if (set_to_load_agen (dep_insn, insn))
30849 return 3;
30850 break;
30852 case TYPE_STORE:
30853 case TYPE_FPLOAD:
30854 case TYPE_FPSTORE:
30856 if (get_attr_update (dep_insn) == UPDATE_YES
30857 && set_to_load_agen (dep_insn, insn))
30858 return 3;
30859 break;
30861 case TYPE_MUL:
30863 if (set_to_load_agen (dep_insn, insn))
30864 return 17;
30865 break;
30867 case TYPE_DIV:
30869 if (set_to_load_agen (dep_insn, insn))
30870 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30871 break;
30873 default:
30874 break;
30877 break;
30879 case TYPE_FPLOAD:
30880 if ((rs6000_tune == PROCESSOR_POWER6)
30881 && get_attr_update (insn) == UPDATE_NO
30882 && recog_memoized (dep_insn)
30883 && (INSN_CODE (dep_insn) >= 0)
30884 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30885 return 2;
30887 default:
30888 break;
30891 /* Fall out to return default cost. */
30893 break;
30895 case REG_DEP_OUTPUT:
30896 /* Output dependency; DEP_INSN writes a register that INSN writes some
30897 cycles later. */
30898 if ((rs6000_tune == PROCESSOR_POWER6)
30899 && recog_memoized (dep_insn)
30900 && (INSN_CODE (dep_insn) >= 0))
30902 attr_type = get_attr_type (insn);
30904 switch (attr_type)
30906 case TYPE_FP:
30907 case TYPE_FPSIMPLE:
30908 if (get_attr_type (dep_insn) == TYPE_FP
30909 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30910 return 1;
30911 break;
30912 case TYPE_FPLOAD:
30913 if (get_attr_update (insn) == UPDATE_NO
30914 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30915 return 2;
30916 break;
30917 default:
30918 break;
30921 /* Fall through, no cost for output dependency. */
30922 /* FALLTHRU */
30924 case REG_DEP_ANTI:
30925 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30926 cycles later. */
30927 return 0;
30929 default:
30930 gcc_unreachable ();
30933 return cost;
30936 /* Debug version of rs6000_adjust_cost. */
30938 static int
30939 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30940 int cost, unsigned int dw)
30942 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30944 if (ret != cost)
30946 const char *dep;
30948 switch (dep_type)
30950 default: dep = "unknown depencency"; break;
30951 case REG_DEP_TRUE: dep = "data dependency"; break;
30952 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30953 case REG_DEP_ANTI: dep = "anti depencency"; break;
30956 fprintf (stderr,
30957 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30958 "%s, insn:\n", ret, cost, dep);
30960 debug_rtx (insn);
30963 return ret;
30966 /* The function returns a true if INSN is microcoded.
30967 Return false otherwise. */
30969 static bool
30970 is_microcoded_insn (rtx_insn *insn)
30972 if (!insn || !NONDEBUG_INSN_P (insn)
30973 || GET_CODE (PATTERN (insn)) == USE
30974 || GET_CODE (PATTERN (insn)) == CLOBBER)
30975 return false;
30977 if (rs6000_tune == PROCESSOR_CELL)
30978 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30980 if (rs6000_sched_groups
30981 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30983 enum attr_type type = get_attr_type (insn);
30984 if ((type == TYPE_LOAD
30985 && get_attr_update (insn) == UPDATE_YES
30986 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30987 || ((type == TYPE_LOAD || type == TYPE_STORE)
30988 && get_attr_update (insn) == UPDATE_YES
30989 && get_attr_indexed (insn) == INDEXED_YES)
30990 || type == TYPE_MFCR)
30991 return true;
30994 return false;
30997 /* The function returns true if INSN is cracked into 2 instructions
30998 by the processor (and therefore occupies 2 issue slots). */
31000 static bool
31001 is_cracked_insn (rtx_insn *insn)
31003 if (!insn || !NONDEBUG_INSN_P (insn)
31004 || GET_CODE (PATTERN (insn)) == USE
31005 || GET_CODE (PATTERN (insn)) == CLOBBER)
31006 return false;
31008 if (rs6000_sched_groups
31009 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
31011 enum attr_type type = get_attr_type (insn);
31012 if ((type == TYPE_LOAD
31013 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31014 && get_attr_update (insn) == UPDATE_NO)
31015 || (type == TYPE_LOAD
31016 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
31017 && get_attr_update (insn) == UPDATE_YES
31018 && get_attr_indexed (insn) == INDEXED_NO)
31019 || (type == TYPE_STORE
31020 && get_attr_update (insn) == UPDATE_YES
31021 && get_attr_indexed (insn) == INDEXED_NO)
31022 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
31023 && get_attr_update (insn) == UPDATE_YES)
31024 || (type == TYPE_CR_LOGICAL
31025 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
31026 || (type == TYPE_EXTS
31027 && get_attr_dot (insn) == DOT_YES)
31028 || (type == TYPE_SHIFT
31029 && get_attr_dot (insn) == DOT_YES
31030 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
31031 || (type == TYPE_MUL
31032 && get_attr_dot (insn) == DOT_YES)
31033 || type == TYPE_DIV
31034 || (type == TYPE_INSERT
31035 && get_attr_size (insn) == SIZE_32))
31036 return true;
31039 return false;
31042 /* The function returns true if INSN can be issued only from
31043 the branch slot. */
31045 static bool
31046 is_branch_slot_insn (rtx_insn *insn)
31048 if (!insn || !NONDEBUG_INSN_P (insn)
31049 || GET_CODE (PATTERN (insn)) == USE
31050 || GET_CODE (PATTERN (insn)) == CLOBBER)
31051 return false;
31053 if (rs6000_sched_groups)
31055 enum attr_type type = get_attr_type (insn);
31056 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
31057 return true;
31058 return false;
31061 return false;
31064 /* The function returns true if out_inst sets a value that is
31065 used in the address generation computation of in_insn */
31066 static bool
31067 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
31069 rtx out_set, in_set;
31071 /* For performance reasons, only handle the simple case where
31072 both loads are a single_set. */
31073 out_set = single_set (out_insn);
31074 if (out_set)
31076 in_set = single_set (in_insn);
31077 if (in_set)
31078 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
31081 return false;
31084 /* Try to determine base/offset/size parts of the given MEM.
31085 Return true if successful, false if all the values couldn't
31086 be determined.
31088 This function only looks for REG or REG+CONST address forms.
31089 REG+REG address form will return false. */
31091 static bool
31092 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
31093 HOST_WIDE_INT *size)
31095 rtx addr_rtx;
31096 if MEM_SIZE_KNOWN_P (mem)
31097 *size = MEM_SIZE (mem);
31098 else
31099 return false;
31101 addr_rtx = (XEXP (mem, 0));
31102 if (GET_CODE (addr_rtx) == PRE_MODIFY)
31103 addr_rtx = XEXP (addr_rtx, 1);
31105 *offset = 0;
31106 while (GET_CODE (addr_rtx) == PLUS
31107 && CONST_INT_P (XEXP (addr_rtx, 1)))
31109 *offset += INTVAL (XEXP (addr_rtx, 1));
31110 addr_rtx = XEXP (addr_rtx, 0);
31112 if (!REG_P (addr_rtx))
31113 return false;
31115 *base = addr_rtx;
31116 return true;
31119 /* The function returns true if the target storage location of
31120 mem1 is adjacent to the target storage location of mem2 */
31121 /* Return 1 if memory locations are adjacent. */
31123 static bool
31124 adjacent_mem_locations (rtx mem1, rtx mem2)
31126 rtx reg1, reg2;
31127 HOST_WIDE_INT off1, size1, off2, size2;
31129 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31130 && get_memref_parts (mem2, &reg2, &off2, &size2))
31131 return ((REGNO (reg1) == REGNO (reg2))
31132 && ((off1 + size1 == off2)
31133 || (off2 + size2 == off1)));
31135 return false;
31138 /* This function returns true if it can be determined that the two MEM
31139 locations overlap by at least 1 byte based on base reg/offset/size. */
31141 static bool
31142 mem_locations_overlap (rtx mem1, rtx mem2)
31144 rtx reg1, reg2;
31145 HOST_WIDE_INT off1, size1, off2, size2;
31147 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31148 && get_memref_parts (mem2, &reg2, &off2, &size2))
31149 return ((REGNO (reg1) == REGNO (reg2))
31150 && (((off1 <= off2) && (off1 + size1 > off2))
31151 || ((off2 <= off1) && (off2 + size2 > off1))));
31153 return false;
31156 /* A C statement (sans semicolon) to update the integer scheduling
31157 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31158 INSN earlier, reduce the priority to execute INSN later. Do not
31159 define this macro if you do not need to adjust the scheduling
31160 priorities of insns. */
31162 static int
31163 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31165 rtx load_mem, str_mem;
31166 /* On machines (like the 750) which have asymmetric integer units,
31167 where one integer unit can do multiply and divides and the other
31168 can't, reduce the priority of multiply/divide so it is scheduled
31169 before other integer operations. */
31171 #if 0
31172 if (! INSN_P (insn))
31173 return priority;
31175 if (GET_CODE (PATTERN (insn)) == USE)
31176 return priority;
31178 switch (rs6000_tune) {
31179 case PROCESSOR_PPC750:
31180 switch (get_attr_type (insn))
31182 default:
31183 break;
31185 case TYPE_MUL:
31186 case TYPE_DIV:
31187 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31188 priority, priority);
31189 if (priority >= 0 && priority < 0x01000000)
31190 priority >>= 3;
31191 break;
31194 #endif
31196 if (insn_must_be_first_in_group (insn)
31197 && reload_completed
31198 && current_sched_info->sched_max_insns_priority
31199 && rs6000_sched_restricted_insns_priority)
31202 /* Prioritize insns that can be dispatched only in the first
31203 dispatch slot. */
31204 if (rs6000_sched_restricted_insns_priority == 1)
31205 /* Attach highest priority to insn. This means that in
31206 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31207 precede 'priority' (critical path) considerations. */
31208 return current_sched_info->sched_max_insns_priority;
31209 else if (rs6000_sched_restricted_insns_priority == 2)
31210 /* Increase priority of insn by a minimal amount. This means that in
31211 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31212 considerations precede dispatch-slot restriction considerations. */
31213 return (priority + 1);
31216 if (rs6000_tune == PROCESSOR_POWER6
31217 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31218 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31219 /* Attach highest priority to insn if the scheduler has just issued two
31220 stores and this instruction is a load, or two loads and this instruction
31221 is a store. Power6 wants loads and stores scheduled alternately
31222 when possible */
31223 return current_sched_info->sched_max_insns_priority;
31225 return priority;
31228 /* Return true if the instruction is nonpipelined on the Cell. */
31229 static bool
31230 is_nonpipeline_insn (rtx_insn *insn)
31232 enum attr_type type;
31233 if (!insn || !NONDEBUG_INSN_P (insn)
31234 || GET_CODE (PATTERN (insn)) == USE
31235 || GET_CODE (PATTERN (insn)) == CLOBBER)
31236 return false;
31238 type = get_attr_type (insn);
31239 if (type == TYPE_MUL
31240 || type == TYPE_DIV
31241 || type == TYPE_SDIV
31242 || type == TYPE_DDIV
31243 || type == TYPE_SSQRT
31244 || type == TYPE_DSQRT
31245 || type == TYPE_MFCR
31246 || type == TYPE_MFCRF
31247 || type == TYPE_MFJMPR)
31249 return true;
31251 return false;
31255 /* Return how many instructions the machine can issue per cycle. */
31257 static int
31258 rs6000_issue_rate (void)
31260 /* Unless scheduling for register pressure, use issue rate of 1 for
31261 first scheduling pass to decrease degradation. */
31262 if (!reload_completed && !flag_sched_pressure)
31263 return 1;
31265 switch (rs6000_tune) {
31266 case PROCESSOR_RS64A:
31267 case PROCESSOR_PPC601: /* ? */
31268 case PROCESSOR_PPC7450:
31269 return 3;
31270 case PROCESSOR_PPC440:
31271 case PROCESSOR_PPC603:
31272 case PROCESSOR_PPC750:
31273 case PROCESSOR_PPC7400:
31274 case PROCESSOR_PPC8540:
31275 case PROCESSOR_PPC8548:
31276 case PROCESSOR_CELL:
31277 case PROCESSOR_PPCE300C2:
31278 case PROCESSOR_PPCE300C3:
31279 case PROCESSOR_PPCE500MC:
31280 case PROCESSOR_PPCE500MC64:
31281 case PROCESSOR_PPCE5500:
31282 case PROCESSOR_PPCE6500:
31283 case PROCESSOR_TITAN:
31284 return 2;
31285 case PROCESSOR_PPC476:
31286 case PROCESSOR_PPC604:
31287 case PROCESSOR_PPC604e:
31288 case PROCESSOR_PPC620:
31289 case PROCESSOR_PPC630:
31290 return 4;
31291 case PROCESSOR_POWER4:
31292 case PROCESSOR_POWER5:
31293 case PROCESSOR_POWER6:
31294 case PROCESSOR_POWER7:
31295 return 5;
31296 case PROCESSOR_POWER8:
31297 return 7;
31298 case PROCESSOR_POWER9:
31299 return 6;
31300 default:
31301 return 1;
31305 /* Return how many instructions to look ahead for better insn
31306 scheduling. */
31308 static int
31309 rs6000_use_sched_lookahead (void)
31311 switch (rs6000_tune)
31313 case PROCESSOR_PPC8540:
31314 case PROCESSOR_PPC8548:
31315 return 4;
31317 case PROCESSOR_CELL:
31318 return (reload_completed ? 8 : 0);
31320 default:
31321 return 0;
31325 /* We are choosing insn from the ready queue. Return zero if INSN can be
31326 chosen. */
31327 static int
31328 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31330 if (ready_index == 0)
31331 return 0;
31333 if (rs6000_tune != PROCESSOR_CELL)
31334 return 0;
31336 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31338 if (!reload_completed
31339 || is_nonpipeline_insn (insn)
31340 || is_microcoded_insn (insn))
31341 return 1;
31343 return 0;
31346 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31347 and return true. */
31349 static bool
31350 find_mem_ref (rtx pat, rtx *mem_ref)
31352 const char * fmt;
31353 int i, j;
31355 /* stack_tie does not produce any real memory traffic. */
31356 if (tie_operand (pat, VOIDmode))
31357 return false;
31359 if (GET_CODE (pat) == MEM)
31361 *mem_ref = pat;
31362 return true;
31365 /* Recursively process the pattern. */
31366 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31368 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31370 if (fmt[i] == 'e')
31372 if (find_mem_ref (XEXP (pat, i), mem_ref))
31373 return true;
31375 else if (fmt[i] == 'E')
31376 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31378 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31379 return true;
31383 return false;
31386 /* Determine if PAT is a PATTERN of a load insn. */
31388 static bool
31389 is_load_insn1 (rtx pat, rtx *load_mem)
31391 if (!pat || pat == NULL_RTX)
31392 return false;
31394 if (GET_CODE (pat) == SET)
31395 return find_mem_ref (SET_SRC (pat), load_mem);
31397 if (GET_CODE (pat) == PARALLEL)
31399 int i;
31401 for (i = 0; i < XVECLEN (pat, 0); i++)
31402 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31403 return true;
31406 return false;
31409 /* Determine if INSN loads from memory. */
31411 static bool
31412 is_load_insn (rtx insn, rtx *load_mem)
31414 if (!insn || !INSN_P (insn))
31415 return false;
31417 if (CALL_P (insn))
31418 return false;
31420 return is_load_insn1 (PATTERN (insn), load_mem);
31423 /* Determine if PAT is a PATTERN of a store insn. */
31425 static bool
31426 is_store_insn1 (rtx pat, rtx *str_mem)
31428 if (!pat || pat == NULL_RTX)
31429 return false;
31431 if (GET_CODE (pat) == SET)
31432 return find_mem_ref (SET_DEST (pat), str_mem);
31434 if (GET_CODE (pat) == PARALLEL)
31436 int i;
31438 for (i = 0; i < XVECLEN (pat, 0); i++)
31439 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31440 return true;
31443 return false;
31446 /* Determine if INSN stores to memory. */
31448 static bool
31449 is_store_insn (rtx insn, rtx *str_mem)
31451 if (!insn || !INSN_P (insn))
31452 return false;
31454 return is_store_insn1 (PATTERN (insn), str_mem);
31457 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31459 static bool
31460 is_power9_pairable_vec_type (enum attr_type type)
31462 switch (type)
31464 case TYPE_VECSIMPLE:
31465 case TYPE_VECCOMPLEX:
31466 case TYPE_VECDIV:
31467 case TYPE_VECCMP:
31468 case TYPE_VECPERM:
31469 case TYPE_VECFLOAT:
31470 case TYPE_VECFDIV:
31471 case TYPE_VECDOUBLE:
31472 return true;
31473 default:
31474 break;
31476 return false;
31479 /* Returns whether the dependence between INSN and NEXT is considered
31480 costly by the given target. */
31482 static bool
31483 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31485 rtx insn;
31486 rtx next;
31487 rtx load_mem, str_mem;
31489 /* If the flag is not enabled - no dependence is considered costly;
31490 allow all dependent insns in the same group.
31491 This is the most aggressive option. */
31492 if (rs6000_sched_costly_dep == no_dep_costly)
31493 return false;
31495 /* If the flag is set to 1 - a dependence is always considered costly;
31496 do not allow dependent instructions in the same group.
31497 This is the most conservative option. */
31498 if (rs6000_sched_costly_dep == all_deps_costly)
31499 return true;
31501 insn = DEP_PRO (dep);
31502 next = DEP_CON (dep);
31504 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31505 && is_load_insn (next, &load_mem)
31506 && is_store_insn (insn, &str_mem))
31507 /* Prevent load after store in the same group. */
31508 return true;
31510 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31511 && is_load_insn (next, &load_mem)
31512 && is_store_insn (insn, &str_mem)
31513 && DEP_TYPE (dep) == REG_DEP_TRUE
31514 && mem_locations_overlap(str_mem, load_mem))
31515 /* Prevent load after store in the same group if it is a true
31516 dependence. */
31517 return true;
31519 /* The flag is set to X; dependences with latency >= X are considered costly,
31520 and will not be scheduled in the same group. */
31521 if (rs6000_sched_costly_dep <= max_dep_latency
31522 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31523 return true;
31525 return false;
31528 /* Return the next insn after INSN that is found before TAIL is reached,
31529 skipping any "non-active" insns - insns that will not actually occupy
31530 an issue slot. Return NULL_RTX if such an insn is not found. */
31532 static rtx_insn *
31533 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31535 if (insn == NULL_RTX || insn == tail)
31536 return NULL;
31538 while (1)
31540 insn = NEXT_INSN (insn);
31541 if (insn == NULL_RTX || insn == tail)
31542 return NULL;
31544 if (CALL_P (insn)
31545 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31546 || (NONJUMP_INSN_P (insn)
31547 && GET_CODE (PATTERN (insn)) != USE
31548 && GET_CODE (PATTERN (insn)) != CLOBBER
31549 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31550 break;
31552 return insn;
31555 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31557 static int
31558 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31560 int pos;
31561 int i;
31562 rtx_insn *tmp;
31563 enum attr_type type, type2;
31565 type = get_attr_type (last_scheduled_insn);
31567 /* Try to issue fixed point divides back-to-back in pairs so they will be
31568 routed to separate execution units and execute in parallel. */
31569 if (type == TYPE_DIV && divide_cnt == 0)
31571 /* First divide has been scheduled. */
31572 divide_cnt = 1;
31574 /* Scan the ready list looking for another divide, if found move it
31575 to the end of the list so it is chosen next. */
31576 pos = lastpos;
31577 while (pos >= 0)
31579 if (recog_memoized (ready[pos]) >= 0
31580 && get_attr_type (ready[pos]) == TYPE_DIV)
31582 tmp = ready[pos];
31583 for (i = pos; i < lastpos; i++)
31584 ready[i] = ready[i + 1];
31585 ready[lastpos] = tmp;
31586 break;
31588 pos--;
31591 else
31593 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31594 divide_cnt = 0;
31596 /* The best dispatch throughput for vector and vector load insns can be
31597 achieved by interleaving a vector and vector load such that they'll
31598 dispatch to the same superslice. If this pairing cannot be achieved
31599 then it is best to pair vector insns together and vector load insns
31600 together.
31602 To aid in this pairing, vec_pairing maintains the current state with
31603 the following values:
31605 0 : Initial state, no vecload/vector pairing has been started.
31607 1 : A vecload or vector insn has been issued and a candidate for
31608 pairing has been found and moved to the end of the ready
31609 list. */
31610 if (type == TYPE_VECLOAD)
31612 /* Issued a vecload. */
31613 if (vec_pairing == 0)
31615 int vecload_pos = -1;
31616 /* We issued a single vecload, look for a vector insn to pair it
31617 with. If one isn't found, try to pair another vecload. */
31618 pos = lastpos;
31619 while (pos >= 0)
31621 if (recog_memoized (ready[pos]) >= 0)
31623 type2 = get_attr_type (ready[pos]);
31624 if (is_power9_pairable_vec_type (type2))
31626 /* Found a vector insn to pair with, move it to the
31627 end of the ready list so it is scheduled next. */
31628 tmp = ready[pos];
31629 for (i = pos; i < lastpos; i++)
31630 ready[i] = ready[i + 1];
31631 ready[lastpos] = tmp;
31632 vec_pairing = 1;
31633 return cached_can_issue_more;
31635 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31636 /* Remember position of first vecload seen. */
31637 vecload_pos = pos;
31639 pos--;
31641 if (vecload_pos >= 0)
31643 /* Didn't find a vector to pair with but did find a vecload,
31644 move it to the end of the ready list. */
31645 tmp = ready[vecload_pos];
31646 for (i = vecload_pos; i < lastpos; i++)
31647 ready[i] = ready[i + 1];
31648 ready[lastpos] = tmp;
31649 vec_pairing = 1;
31650 return cached_can_issue_more;
31654 else if (is_power9_pairable_vec_type (type))
31656 /* Issued a vector operation. */
31657 if (vec_pairing == 0)
31659 int vec_pos = -1;
31660 /* We issued a single vector insn, look for a vecload to pair it
31661 with. If one isn't found, try to pair another vector. */
31662 pos = lastpos;
31663 while (pos >= 0)
31665 if (recog_memoized (ready[pos]) >= 0)
31667 type2 = get_attr_type (ready[pos]);
31668 if (type2 == TYPE_VECLOAD)
31670 /* Found a vecload insn to pair with, move it to the
31671 end of the ready list so it is scheduled next. */
31672 tmp = ready[pos];
31673 for (i = pos; i < lastpos; i++)
31674 ready[i] = ready[i + 1];
31675 ready[lastpos] = tmp;
31676 vec_pairing = 1;
31677 return cached_can_issue_more;
31679 else if (is_power9_pairable_vec_type (type2)
31680 && vec_pos == -1)
31681 /* Remember position of first vector insn seen. */
31682 vec_pos = pos;
31684 pos--;
31686 if (vec_pos >= 0)
31688 /* Didn't find a vecload to pair with but did find a vector
31689 insn, move it to the end of the ready list. */
31690 tmp = ready[vec_pos];
31691 for (i = vec_pos; i < lastpos; i++)
31692 ready[i] = ready[i + 1];
31693 ready[lastpos] = tmp;
31694 vec_pairing = 1;
31695 return cached_can_issue_more;
31700 /* We've either finished a vec/vecload pair, couldn't find an insn to
31701 continue the current pair, or the last insn had nothing to do with
31702 with pairing. In any case, reset the state. */
31703 vec_pairing = 0;
31706 return cached_can_issue_more;
31709 /* We are about to begin issuing insns for this clock cycle. */
31711 static int
31712 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31713 rtx_insn **ready ATTRIBUTE_UNUSED,
31714 int *pn_ready ATTRIBUTE_UNUSED,
31715 int clock_var ATTRIBUTE_UNUSED)
31717 int n_ready = *pn_ready;
31719 if (sched_verbose)
31720 fprintf (dump, "// rs6000_sched_reorder :\n");
31722 /* Reorder the ready list, if the second to last ready insn
31723 is a nonepipeline insn. */
31724 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31726 if (is_nonpipeline_insn (ready[n_ready - 1])
31727 && (recog_memoized (ready[n_ready - 2]) > 0))
31728 /* Simply swap first two insns. */
31729 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31732 if (rs6000_tune == PROCESSOR_POWER6)
31733 load_store_pendulum = 0;
31735 return rs6000_issue_rate ();
31738 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31740 static int
31741 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31742 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31744 if (sched_verbose)
31745 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31747 /* For Power6, we need to handle some special cases to try and keep the
31748 store queue from overflowing and triggering expensive flushes.
31750 This code monitors how load and store instructions are being issued
31751 and skews the ready list one way or the other to increase the likelihood
31752 that a desired instruction is issued at the proper time.
31754 A couple of things are done. First, we maintain a "load_store_pendulum"
31755 to track the current state of load/store issue.
31757 - If the pendulum is at zero, then no loads or stores have been
31758 issued in the current cycle so we do nothing.
31760 - If the pendulum is 1, then a single load has been issued in this
31761 cycle and we attempt to locate another load in the ready list to
31762 issue with it.
31764 - If the pendulum is -2, then two stores have already been
31765 issued in this cycle, so we increase the priority of the first load
31766 in the ready list to increase it's likelihood of being chosen first
31767 in the next cycle.
31769 - If the pendulum is -1, then a single store has been issued in this
31770 cycle and we attempt to locate another store in the ready list to
31771 issue with it, preferring a store to an adjacent memory location to
31772 facilitate store pairing in the store queue.
31774 - If the pendulum is 2, then two loads have already been
31775 issued in this cycle, so we increase the priority of the first store
31776 in the ready list to increase it's likelihood of being chosen first
31777 in the next cycle.
31779 - If the pendulum < -2 or > 2, then do nothing.
31781 Note: This code covers the most common scenarios. There exist non
31782 load/store instructions which make use of the LSU and which
31783 would need to be accounted for to strictly model the behavior
31784 of the machine. Those instructions are currently unaccounted
31785 for to help minimize compile time overhead of this code.
31787 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31789 int pos;
31790 int i;
31791 rtx_insn *tmp;
31792 rtx load_mem, str_mem;
31794 if (is_store_insn (last_scheduled_insn, &str_mem))
31795 /* Issuing a store, swing the load_store_pendulum to the left */
31796 load_store_pendulum--;
31797 else if (is_load_insn (last_scheduled_insn, &load_mem))
31798 /* Issuing a load, swing the load_store_pendulum to the right */
31799 load_store_pendulum++;
31800 else
31801 return cached_can_issue_more;
31803 /* If the pendulum is balanced, or there is only one instruction on
31804 the ready list, then all is well, so return. */
31805 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31806 return cached_can_issue_more;
31808 if (load_store_pendulum == 1)
31810 /* A load has been issued in this cycle. Scan the ready list
31811 for another load to issue with it */
31812 pos = *pn_ready-1;
31814 while (pos >= 0)
31816 if (is_load_insn (ready[pos], &load_mem))
31818 /* Found a load. Move it to the head of the ready list,
31819 and adjust it's priority so that it is more likely to
31820 stay there */
31821 tmp = ready[pos];
31822 for (i=pos; i<*pn_ready-1; i++)
31823 ready[i] = ready[i + 1];
31824 ready[*pn_ready-1] = tmp;
31826 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31827 INSN_PRIORITY (tmp)++;
31828 break;
31830 pos--;
31833 else if (load_store_pendulum == -2)
31835 /* Two stores have been issued in this cycle. Increase the
31836 priority of the first load in the ready list to favor it for
31837 issuing in the next cycle. */
31838 pos = *pn_ready-1;
31840 while (pos >= 0)
31842 if (is_load_insn (ready[pos], &load_mem)
31843 && !sel_sched_p ()
31844 && INSN_PRIORITY_KNOWN (ready[pos]))
31846 INSN_PRIORITY (ready[pos])++;
31848 /* Adjust the pendulum to account for the fact that a load
31849 was found and increased in priority. This is to prevent
31850 increasing the priority of multiple loads */
31851 load_store_pendulum--;
31853 break;
31855 pos--;
31858 else if (load_store_pendulum == -1)
31860 /* A store has been issued in this cycle. Scan the ready list for
31861 another store to issue with it, preferring a store to an adjacent
31862 memory location */
31863 int first_store_pos = -1;
31865 pos = *pn_ready-1;
31867 while (pos >= 0)
31869 if (is_store_insn (ready[pos], &str_mem))
31871 rtx str_mem2;
31872 /* Maintain the index of the first store found on the
31873 list */
31874 if (first_store_pos == -1)
31875 first_store_pos = pos;
31877 if (is_store_insn (last_scheduled_insn, &str_mem2)
31878 && adjacent_mem_locations (str_mem, str_mem2))
31880 /* Found an adjacent store. Move it to the head of the
31881 ready list, and adjust it's priority so that it is
31882 more likely to stay there */
31883 tmp = ready[pos];
31884 for (i=pos; i<*pn_ready-1; i++)
31885 ready[i] = ready[i + 1];
31886 ready[*pn_ready-1] = tmp;
31888 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31889 INSN_PRIORITY (tmp)++;
31891 first_store_pos = -1;
31893 break;
31896 pos--;
31899 if (first_store_pos >= 0)
31901 /* An adjacent store wasn't found, but a non-adjacent store was,
31902 so move the non-adjacent store to the front of the ready
31903 list, and adjust its priority so that it is more likely to
31904 stay there. */
31905 tmp = ready[first_store_pos];
31906 for (i=first_store_pos; i<*pn_ready-1; i++)
31907 ready[i] = ready[i + 1];
31908 ready[*pn_ready-1] = tmp;
31909 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31910 INSN_PRIORITY (tmp)++;
31913 else if (load_store_pendulum == 2)
31915 /* Two loads have been issued in this cycle. Increase the priority
31916 of the first store in the ready list to favor it for issuing in
31917 the next cycle. */
31918 pos = *pn_ready-1;
31920 while (pos >= 0)
31922 if (is_store_insn (ready[pos], &str_mem)
31923 && !sel_sched_p ()
31924 && INSN_PRIORITY_KNOWN (ready[pos]))
31926 INSN_PRIORITY (ready[pos])++;
31928 /* Adjust the pendulum to account for the fact that a store
31929 was found and increased in priority. This is to prevent
31930 increasing the priority of multiple stores */
31931 load_store_pendulum++;
31933 break;
31935 pos--;
31940 /* Do Power9 dependent reordering if necessary. */
31941 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31942 && recog_memoized (last_scheduled_insn) >= 0)
31943 return power9_sched_reorder2 (ready, *pn_ready - 1);
31945 return cached_can_issue_more;
31948 /* Return whether the presence of INSN causes a dispatch group termination
31949 of group WHICH_GROUP.
31951 If WHICH_GROUP == current_group, this function will return true if INSN
31952 causes the termination of the current group (i.e, the dispatch group to
31953 which INSN belongs). This means that INSN will be the last insn in the
31954 group it belongs to.
31956 If WHICH_GROUP == previous_group, this function will return true if INSN
31957 causes the termination of the previous group (i.e, the dispatch group that
31958 precedes the group to which INSN belongs). This means that INSN will be
31959 the first insn in the group it belongs to). */
31961 static bool
31962 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31964 bool first, last;
31966 if (! insn)
31967 return false;
31969 first = insn_must_be_first_in_group (insn);
31970 last = insn_must_be_last_in_group (insn);
31972 if (first && last)
31973 return true;
31975 if (which_group == current_group)
31976 return last;
31977 else if (which_group == previous_group)
31978 return first;
31980 return false;
31984 static bool
31985 insn_must_be_first_in_group (rtx_insn *insn)
31987 enum attr_type type;
31989 if (!insn
31990 || NOTE_P (insn)
31991 || DEBUG_INSN_P (insn)
31992 || GET_CODE (PATTERN (insn)) == USE
31993 || GET_CODE (PATTERN (insn)) == CLOBBER)
31994 return false;
31996 switch (rs6000_tune)
31998 case PROCESSOR_POWER5:
31999 if (is_cracked_insn (insn))
32000 return true;
32001 /* FALLTHRU */
32002 case PROCESSOR_POWER4:
32003 if (is_microcoded_insn (insn))
32004 return true;
32006 if (!rs6000_sched_groups)
32007 return false;
32009 type = get_attr_type (insn);
32011 switch (type)
32013 case TYPE_MFCR:
32014 case TYPE_MFCRF:
32015 case TYPE_MTCR:
32016 case TYPE_CR_LOGICAL:
32017 case TYPE_MTJMPR:
32018 case TYPE_MFJMPR:
32019 case TYPE_DIV:
32020 case TYPE_LOAD_L:
32021 case TYPE_STORE_C:
32022 case TYPE_ISYNC:
32023 case TYPE_SYNC:
32024 return true;
32025 default:
32026 break;
32028 break;
32029 case PROCESSOR_POWER6:
32030 type = get_attr_type (insn);
32032 switch (type)
32034 case TYPE_EXTS:
32035 case TYPE_CNTLZ:
32036 case TYPE_TRAP:
32037 case TYPE_MUL:
32038 case TYPE_INSERT:
32039 case TYPE_FPCOMPARE:
32040 case TYPE_MFCR:
32041 case TYPE_MTCR:
32042 case TYPE_MFJMPR:
32043 case TYPE_MTJMPR:
32044 case TYPE_ISYNC:
32045 case TYPE_SYNC:
32046 case TYPE_LOAD_L:
32047 case TYPE_STORE_C:
32048 return true;
32049 case TYPE_SHIFT:
32050 if (get_attr_dot (insn) == DOT_NO
32051 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32052 return true;
32053 else
32054 break;
32055 case TYPE_DIV:
32056 if (get_attr_size (insn) == SIZE_32)
32057 return true;
32058 else
32059 break;
32060 case TYPE_LOAD:
32061 case TYPE_STORE:
32062 case TYPE_FPLOAD:
32063 case TYPE_FPSTORE:
32064 if (get_attr_update (insn) == UPDATE_YES)
32065 return true;
32066 else
32067 break;
32068 default:
32069 break;
32071 break;
32072 case PROCESSOR_POWER7:
32073 type = get_attr_type (insn);
32075 switch (type)
32077 case TYPE_CR_LOGICAL:
32078 case TYPE_MFCR:
32079 case TYPE_MFCRF:
32080 case TYPE_MTCR:
32081 case TYPE_DIV:
32082 case TYPE_ISYNC:
32083 case TYPE_LOAD_L:
32084 case TYPE_STORE_C:
32085 case TYPE_MFJMPR:
32086 case TYPE_MTJMPR:
32087 return true;
32088 case TYPE_MUL:
32089 case TYPE_SHIFT:
32090 case TYPE_EXTS:
32091 if (get_attr_dot (insn) == DOT_YES)
32092 return true;
32093 else
32094 break;
32095 case TYPE_LOAD:
32096 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32097 || get_attr_update (insn) == UPDATE_YES)
32098 return true;
32099 else
32100 break;
32101 case TYPE_STORE:
32102 case TYPE_FPLOAD:
32103 case TYPE_FPSTORE:
32104 if (get_attr_update (insn) == UPDATE_YES)
32105 return true;
32106 else
32107 break;
32108 default:
32109 break;
32111 break;
32112 case PROCESSOR_POWER8:
32113 type = get_attr_type (insn);
32115 switch (type)
32117 case TYPE_CR_LOGICAL:
32118 case TYPE_MFCR:
32119 case TYPE_MFCRF:
32120 case TYPE_MTCR:
32121 case TYPE_SYNC:
32122 case TYPE_ISYNC:
32123 case TYPE_LOAD_L:
32124 case TYPE_STORE_C:
32125 case TYPE_VECSTORE:
32126 case TYPE_MFJMPR:
32127 case TYPE_MTJMPR:
32128 return true;
32129 case TYPE_SHIFT:
32130 case TYPE_EXTS:
32131 case TYPE_MUL:
32132 if (get_attr_dot (insn) == DOT_YES)
32133 return true;
32134 else
32135 break;
32136 case TYPE_LOAD:
32137 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32138 || get_attr_update (insn) == UPDATE_YES)
32139 return true;
32140 else
32141 break;
32142 case TYPE_STORE:
32143 if (get_attr_update (insn) == UPDATE_YES
32144 && get_attr_indexed (insn) == INDEXED_YES)
32145 return true;
32146 else
32147 break;
32148 default:
32149 break;
32151 break;
32152 default:
32153 break;
32156 return false;
32159 static bool
32160 insn_must_be_last_in_group (rtx_insn *insn)
32162 enum attr_type type;
32164 if (!insn
32165 || NOTE_P (insn)
32166 || DEBUG_INSN_P (insn)
32167 || GET_CODE (PATTERN (insn)) == USE
32168 || GET_CODE (PATTERN (insn)) == CLOBBER)
32169 return false;
32171 switch (rs6000_tune) {
32172 case PROCESSOR_POWER4:
32173 case PROCESSOR_POWER5:
32174 if (is_microcoded_insn (insn))
32175 return true;
32177 if (is_branch_slot_insn (insn))
32178 return true;
32180 break;
32181 case PROCESSOR_POWER6:
32182 type = get_attr_type (insn);
32184 switch (type)
32186 case TYPE_EXTS:
32187 case TYPE_CNTLZ:
32188 case TYPE_TRAP:
32189 case TYPE_MUL:
32190 case TYPE_FPCOMPARE:
32191 case TYPE_MFCR:
32192 case TYPE_MTCR:
32193 case TYPE_MFJMPR:
32194 case TYPE_MTJMPR:
32195 case TYPE_ISYNC:
32196 case TYPE_SYNC:
32197 case TYPE_LOAD_L:
32198 case TYPE_STORE_C:
32199 return true;
32200 case TYPE_SHIFT:
32201 if (get_attr_dot (insn) == DOT_NO
32202 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32203 return true;
32204 else
32205 break;
32206 case TYPE_DIV:
32207 if (get_attr_size (insn) == SIZE_32)
32208 return true;
32209 else
32210 break;
32211 default:
32212 break;
32214 break;
32215 case PROCESSOR_POWER7:
32216 type = get_attr_type (insn);
32218 switch (type)
32220 case TYPE_ISYNC:
32221 case TYPE_SYNC:
32222 case TYPE_LOAD_L:
32223 case TYPE_STORE_C:
32224 return true;
32225 case TYPE_LOAD:
32226 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32227 && get_attr_update (insn) == UPDATE_YES)
32228 return true;
32229 else
32230 break;
32231 case TYPE_STORE:
32232 if (get_attr_update (insn) == UPDATE_YES
32233 && get_attr_indexed (insn) == INDEXED_YES)
32234 return true;
32235 else
32236 break;
32237 default:
32238 break;
32240 break;
32241 case PROCESSOR_POWER8:
32242 type = get_attr_type (insn);
32244 switch (type)
32246 case TYPE_MFCR:
32247 case TYPE_MTCR:
32248 case TYPE_ISYNC:
32249 case TYPE_SYNC:
32250 case TYPE_LOAD_L:
32251 case TYPE_STORE_C:
32252 return true;
32253 case TYPE_LOAD:
32254 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32255 && get_attr_update (insn) == UPDATE_YES)
32256 return true;
32257 else
32258 break;
32259 case TYPE_STORE:
32260 if (get_attr_update (insn) == UPDATE_YES
32261 && get_attr_indexed (insn) == INDEXED_YES)
32262 return true;
32263 else
32264 break;
32265 default:
32266 break;
32268 break;
32269 default:
32270 break;
32273 return false;
32276 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32277 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32279 static bool
32280 is_costly_group (rtx *group_insns, rtx next_insn)
32282 int i;
32283 int issue_rate = rs6000_issue_rate ();
32285 for (i = 0; i < issue_rate; i++)
32287 sd_iterator_def sd_it;
32288 dep_t dep;
32289 rtx insn = group_insns[i];
32291 if (!insn)
32292 continue;
32294 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32296 rtx next = DEP_CON (dep);
32298 if (next == next_insn
32299 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32300 return true;
32304 return false;
32307 /* Utility of the function redefine_groups.
32308 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32309 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32310 to keep it "far" (in a separate group) from GROUP_INSNS, following
32311 one of the following schemes, depending on the value of the flag
32312 -minsert_sched_nops = X:
32313 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32314 in order to force NEXT_INSN into a separate group.
32315 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32316 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32317 insertion (has a group just ended, how many vacant issue slots remain in the
32318 last group, and how many dispatch groups were encountered so far). */
32320 static int
32321 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32322 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32323 int *group_count)
32325 rtx nop;
32326 bool force;
32327 int issue_rate = rs6000_issue_rate ();
32328 bool end = *group_end;
32329 int i;
32331 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32332 return can_issue_more;
32334 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32335 return can_issue_more;
32337 force = is_costly_group (group_insns, next_insn);
32338 if (!force)
32339 return can_issue_more;
32341 if (sched_verbose > 6)
32342 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32343 *group_count ,can_issue_more);
32345 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32347 if (*group_end)
32348 can_issue_more = 0;
32350 /* Since only a branch can be issued in the last issue_slot, it is
32351 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32352 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32353 in this case the last nop will start a new group and the branch
32354 will be forced to the new group. */
32355 if (can_issue_more && !is_branch_slot_insn (next_insn))
32356 can_issue_more--;
32358 /* Do we have a special group ending nop? */
32359 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32360 || rs6000_tune == PROCESSOR_POWER8)
32362 nop = gen_group_ending_nop ();
32363 emit_insn_before (nop, next_insn);
32364 can_issue_more = 0;
32366 else
32367 while (can_issue_more > 0)
32369 nop = gen_nop ();
32370 emit_insn_before (nop, next_insn);
32371 can_issue_more--;
32374 *group_end = true;
32375 return 0;
32378 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32380 int n_nops = rs6000_sched_insert_nops;
32382 /* Nops can't be issued from the branch slot, so the effective
32383 issue_rate for nops is 'issue_rate - 1'. */
32384 if (can_issue_more == 0)
32385 can_issue_more = issue_rate;
32386 can_issue_more--;
32387 if (can_issue_more == 0)
32389 can_issue_more = issue_rate - 1;
32390 (*group_count)++;
32391 end = true;
32392 for (i = 0; i < issue_rate; i++)
32394 group_insns[i] = 0;
32398 while (n_nops > 0)
32400 nop = gen_nop ();
32401 emit_insn_before (nop, next_insn);
32402 if (can_issue_more == issue_rate - 1) /* new group begins */
32403 end = false;
32404 can_issue_more--;
32405 if (can_issue_more == 0)
32407 can_issue_more = issue_rate - 1;
32408 (*group_count)++;
32409 end = true;
32410 for (i = 0; i < issue_rate; i++)
32412 group_insns[i] = 0;
32415 n_nops--;
32418 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32419 can_issue_more++;
32421 /* Is next_insn going to start a new group? */
32422 *group_end
32423 = (end
32424 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32425 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32426 || (can_issue_more < issue_rate &&
32427 insn_terminates_group_p (next_insn, previous_group)));
32428 if (*group_end && end)
32429 (*group_count)--;
32431 if (sched_verbose > 6)
32432 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32433 *group_count, can_issue_more);
32434 return can_issue_more;
32437 return can_issue_more;
32440 /* This function tries to synch the dispatch groups that the compiler "sees"
32441 with the dispatch groups that the processor dispatcher is expected to
32442 form in practice. It tries to achieve this synchronization by forcing the
32443 estimated processor grouping on the compiler (as opposed to the function
32444 'pad_goups' which tries to force the scheduler's grouping on the processor).
32446 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32447 examines the (estimated) dispatch groups that will be formed by the processor
32448 dispatcher. It marks these group boundaries to reflect the estimated
32449 processor grouping, overriding the grouping that the scheduler had marked.
32450 Depending on the value of the flag '-minsert-sched-nops' this function can
32451 force certain insns into separate groups or force a certain distance between
32452 them by inserting nops, for example, if there exists a "costly dependence"
32453 between the insns.
32455 The function estimates the group boundaries that the processor will form as
32456 follows: It keeps track of how many vacant issue slots are available after
32457 each insn. A subsequent insn will start a new group if one of the following
32458 4 cases applies:
32459 - no more vacant issue slots remain in the current dispatch group.
32460 - only the last issue slot, which is the branch slot, is vacant, but the next
32461 insn is not a branch.
32462 - only the last 2 or less issue slots, including the branch slot, are vacant,
32463 which means that a cracked insn (which occupies two issue slots) can't be
32464 issued in this group.
32465 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32466 start a new group. */
32468 static int
32469 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32470 rtx_insn *tail)
32472 rtx_insn *insn, *next_insn;
32473 int issue_rate;
32474 int can_issue_more;
32475 int slot, i;
32476 bool group_end;
32477 int group_count = 0;
32478 rtx *group_insns;
32480 /* Initialize. */
32481 issue_rate = rs6000_issue_rate ();
32482 group_insns = XALLOCAVEC (rtx, issue_rate);
32483 for (i = 0; i < issue_rate; i++)
32485 group_insns[i] = 0;
32487 can_issue_more = issue_rate;
32488 slot = 0;
32489 insn = get_next_active_insn (prev_head_insn, tail);
32490 group_end = false;
32492 while (insn != NULL_RTX)
32494 slot = (issue_rate - can_issue_more);
32495 group_insns[slot] = insn;
32496 can_issue_more =
32497 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32498 if (insn_terminates_group_p (insn, current_group))
32499 can_issue_more = 0;
32501 next_insn = get_next_active_insn (insn, tail);
32502 if (next_insn == NULL_RTX)
32503 return group_count + 1;
32505 /* Is next_insn going to start a new group? */
32506 group_end
32507 = (can_issue_more == 0
32508 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32509 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32510 || (can_issue_more < issue_rate &&
32511 insn_terminates_group_p (next_insn, previous_group)));
32513 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32514 next_insn, &group_end, can_issue_more,
32515 &group_count);
32517 if (group_end)
32519 group_count++;
32520 can_issue_more = 0;
32521 for (i = 0; i < issue_rate; i++)
32523 group_insns[i] = 0;
32527 if (GET_MODE (next_insn) == TImode && can_issue_more)
32528 PUT_MODE (next_insn, VOIDmode);
32529 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32530 PUT_MODE (next_insn, TImode);
32532 insn = next_insn;
32533 if (can_issue_more == 0)
32534 can_issue_more = issue_rate;
32535 } /* while */
32537 return group_count;
32540 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32541 dispatch group boundaries that the scheduler had marked. Pad with nops
32542 any dispatch groups which have vacant issue slots, in order to force the
32543 scheduler's grouping on the processor dispatcher. The function
32544 returns the number of dispatch groups found. */
32546 static int
32547 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32548 rtx_insn *tail)
32550 rtx_insn *insn, *next_insn;
32551 rtx nop;
32552 int issue_rate;
32553 int can_issue_more;
32554 int group_end;
32555 int group_count = 0;
32557 /* Initialize issue_rate. */
32558 issue_rate = rs6000_issue_rate ();
32559 can_issue_more = issue_rate;
32561 insn = get_next_active_insn (prev_head_insn, tail);
32562 next_insn = get_next_active_insn (insn, tail);
32564 while (insn != NULL_RTX)
32566 can_issue_more =
32567 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32569 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32571 if (next_insn == NULL_RTX)
32572 break;
32574 if (group_end)
32576 /* If the scheduler had marked group termination at this location
32577 (between insn and next_insn), and neither insn nor next_insn will
32578 force group termination, pad the group with nops to force group
32579 termination. */
32580 if (can_issue_more
32581 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32582 && !insn_terminates_group_p (insn, current_group)
32583 && !insn_terminates_group_p (next_insn, previous_group))
32585 if (!is_branch_slot_insn (next_insn))
32586 can_issue_more--;
32588 while (can_issue_more)
32590 nop = gen_nop ();
32591 emit_insn_before (nop, next_insn);
32592 can_issue_more--;
32596 can_issue_more = issue_rate;
32597 group_count++;
32600 insn = next_insn;
32601 next_insn = get_next_active_insn (insn, tail);
32604 return group_count;
32607 /* We're beginning a new block. Initialize data structures as necessary. */
32609 static void
32610 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32611 int sched_verbose ATTRIBUTE_UNUSED,
32612 int max_ready ATTRIBUTE_UNUSED)
32614 last_scheduled_insn = NULL;
32615 load_store_pendulum = 0;
32616 divide_cnt = 0;
32617 vec_pairing = 0;
32620 /* The following function is called at the end of scheduling BB.
32621 After reload, it inserts nops at insn group bundling. */
32623 static void
32624 rs6000_sched_finish (FILE *dump, int sched_verbose)
32626 int n_groups;
32628 if (sched_verbose)
32629 fprintf (dump, "=== Finishing schedule.\n");
32631 if (reload_completed && rs6000_sched_groups)
32633 /* Do not run sched_finish hook when selective scheduling enabled. */
32634 if (sel_sched_p ())
32635 return;
32637 if (rs6000_sched_insert_nops == sched_finish_none)
32638 return;
32640 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32641 n_groups = pad_groups (dump, sched_verbose,
32642 current_sched_info->prev_head,
32643 current_sched_info->next_tail);
32644 else
32645 n_groups = redefine_groups (dump, sched_verbose,
32646 current_sched_info->prev_head,
32647 current_sched_info->next_tail);
32649 if (sched_verbose >= 6)
32651 fprintf (dump, "ngroups = %d\n", n_groups);
32652 print_rtl (dump, current_sched_info->prev_head);
32653 fprintf (dump, "Done finish_sched\n");
32658 struct rs6000_sched_context
32660 short cached_can_issue_more;
32661 rtx_insn *last_scheduled_insn;
32662 int load_store_pendulum;
32663 int divide_cnt;
32664 int vec_pairing;
32667 typedef struct rs6000_sched_context rs6000_sched_context_def;
32668 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32670 /* Allocate store for new scheduling context. */
32671 static void *
32672 rs6000_alloc_sched_context (void)
32674 return xmalloc (sizeof (rs6000_sched_context_def));
32677 /* If CLEAN_P is true then initializes _SC with clean data,
32678 and from the global context otherwise. */
32679 static void
32680 rs6000_init_sched_context (void *_sc, bool clean_p)
32682 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32684 if (clean_p)
32686 sc->cached_can_issue_more = 0;
32687 sc->last_scheduled_insn = NULL;
32688 sc->load_store_pendulum = 0;
32689 sc->divide_cnt = 0;
32690 sc->vec_pairing = 0;
32692 else
32694 sc->cached_can_issue_more = cached_can_issue_more;
32695 sc->last_scheduled_insn = last_scheduled_insn;
32696 sc->load_store_pendulum = load_store_pendulum;
32697 sc->divide_cnt = divide_cnt;
32698 sc->vec_pairing = vec_pairing;
32702 /* Sets the global scheduling context to the one pointed to by _SC. */
32703 static void
32704 rs6000_set_sched_context (void *_sc)
32706 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32708 gcc_assert (sc != NULL);
32710 cached_can_issue_more = sc->cached_can_issue_more;
32711 last_scheduled_insn = sc->last_scheduled_insn;
32712 load_store_pendulum = sc->load_store_pendulum;
32713 divide_cnt = sc->divide_cnt;
32714 vec_pairing = sc->vec_pairing;
32717 /* Free _SC. */
32718 static void
32719 rs6000_free_sched_context (void *_sc)
32721 gcc_assert (_sc != NULL);
32723 free (_sc);
32726 static bool
32727 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32729 switch (get_attr_type (insn))
32731 case TYPE_DIV:
32732 case TYPE_SDIV:
32733 case TYPE_DDIV:
32734 case TYPE_VECDIV:
32735 case TYPE_SSQRT:
32736 case TYPE_DSQRT:
32737 return false;
32739 default:
32740 return true;
32744 /* Length in units of the trampoline for entering a nested function. */
32747 rs6000_trampoline_size (void)
32749 int ret = 0;
32751 switch (DEFAULT_ABI)
32753 default:
32754 gcc_unreachable ();
32756 case ABI_AIX:
32757 ret = (TARGET_32BIT) ? 12 : 24;
32758 break;
32760 case ABI_ELFv2:
32761 gcc_assert (!TARGET_32BIT);
32762 ret = 32;
32763 break;
32765 case ABI_DARWIN:
32766 case ABI_V4:
32767 ret = (TARGET_32BIT) ? 40 : 48;
32768 break;
32771 return ret;
32774 /* Emit RTL insns to initialize the variable parts of a trampoline.
32775 FNADDR is an RTX for the address of the function's pure code.
32776 CXT is an RTX for the static chain value for the function. */
32778 static void
32779 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32781 int regsize = (TARGET_32BIT) ? 4 : 8;
32782 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32783 rtx ctx_reg = force_reg (Pmode, cxt);
32784 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32786 switch (DEFAULT_ABI)
32788 default:
32789 gcc_unreachable ();
32791 /* Under AIX, just build the 3 word function descriptor */
32792 case ABI_AIX:
32794 rtx fnmem, fn_reg, toc_reg;
32796 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32797 error ("you cannot take the address of a nested function if you use "
32798 "the %qs option", "-mno-pointers-to-nested-functions");
32800 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32801 fn_reg = gen_reg_rtx (Pmode);
32802 toc_reg = gen_reg_rtx (Pmode);
32804 /* Macro to shorten the code expansions below. */
32805 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32807 m_tramp = replace_equiv_address (m_tramp, addr);
32809 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32810 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32811 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32812 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32813 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32815 # undef MEM_PLUS
32817 break;
32819 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32820 case ABI_ELFv2:
32821 case ABI_DARWIN:
32822 case ABI_V4:
32823 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32824 LCT_NORMAL, VOIDmode,
32825 addr, Pmode,
32826 GEN_INT (rs6000_trampoline_size ()), SImode,
32827 fnaddr, Pmode,
32828 ctx_reg, Pmode);
32829 break;
32834 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32835 identifier as an argument, so the front end shouldn't look it up. */
32837 static bool
32838 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32840 return is_attribute_p ("altivec", attr_id);
32843 /* Handle the "altivec" attribute. The attribute may have
32844 arguments as follows:
32846 __attribute__((altivec(vector__)))
32847 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32848 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32850 and may appear more than once (e.g., 'vector bool char') in a
32851 given declaration. */
32853 static tree
32854 rs6000_handle_altivec_attribute (tree *node,
32855 tree name ATTRIBUTE_UNUSED,
32856 tree args,
32857 int flags ATTRIBUTE_UNUSED,
32858 bool *no_add_attrs)
32860 tree type = *node, result = NULL_TREE;
32861 machine_mode mode;
32862 int unsigned_p;
32863 char altivec_type
32864 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32865 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32866 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32867 : '?');
32869 while (POINTER_TYPE_P (type)
32870 || TREE_CODE (type) == FUNCTION_TYPE
32871 || TREE_CODE (type) == METHOD_TYPE
32872 || TREE_CODE (type) == ARRAY_TYPE)
32873 type = TREE_TYPE (type);
32875 mode = TYPE_MODE (type);
32877 /* Check for invalid AltiVec type qualifiers. */
32878 if (type == long_double_type_node)
32879 error ("use of %<long double%> in AltiVec types is invalid");
32880 else if (type == boolean_type_node)
32881 error ("use of boolean types in AltiVec types is invalid");
32882 else if (TREE_CODE (type) == COMPLEX_TYPE)
32883 error ("use of %<complex%> in AltiVec types is invalid");
32884 else if (DECIMAL_FLOAT_MODE_P (mode))
32885 error ("use of decimal floating point types in AltiVec types is invalid");
32886 else if (!TARGET_VSX)
32888 if (type == long_unsigned_type_node || type == long_integer_type_node)
32890 if (TARGET_64BIT)
32891 error ("use of %<long%> in AltiVec types is invalid for "
32892 "64-bit code without %qs", "-mvsx");
32893 else if (rs6000_warn_altivec_long)
32894 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32895 "use %<int%>");
32897 else if (type == long_long_unsigned_type_node
32898 || type == long_long_integer_type_node)
32899 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32900 "-mvsx");
32901 else if (type == double_type_node)
32902 error ("use of %<double%> in AltiVec types is invalid without %qs",
32903 "-mvsx");
32906 switch (altivec_type)
32908 case 'v':
32909 unsigned_p = TYPE_UNSIGNED (type);
32910 switch (mode)
32912 case E_TImode:
32913 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32914 break;
32915 case E_DImode:
32916 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32917 break;
32918 case E_SImode:
32919 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32920 break;
32921 case E_HImode:
32922 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32923 break;
32924 case E_QImode:
32925 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32926 break;
32927 case E_SFmode: result = V4SF_type_node; break;
32928 case E_DFmode: result = V2DF_type_node; break;
32929 /* If the user says 'vector int bool', we may be handed the 'bool'
32930 attribute _before_ the 'vector' attribute, and so select the
32931 proper type in the 'b' case below. */
32932 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32933 case E_V2DImode: case E_V2DFmode:
32934 result = type;
32935 default: break;
32937 break;
32938 case 'b':
32939 switch (mode)
32941 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32942 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32943 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32944 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32945 default: break;
32947 break;
32948 case 'p':
32949 switch (mode)
32951 case E_V8HImode: result = pixel_V8HI_type_node;
32952 default: break;
32954 default: break;
32957 /* Propagate qualifiers attached to the element type
32958 onto the vector type. */
32959 if (result && result != type && TYPE_QUALS (type))
32960 result = build_qualified_type (result, TYPE_QUALS (type));
32962 *no_add_attrs = true; /* No need to hang on to the attribute. */
32964 if (result)
32965 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32967 return NULL_TREE;
32970 /* AltiVec defines four built-in scalar types that serve as vector
32971 elements; we must teach the compiler how to mangle them. */
32973 static const char *
32974 rs6000_mangle_type (const_tree type)
32976 type = TYPE_MAIN_VARIANT (type);
32978 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32979 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32980 return NULL;
32982 if (type == bool_char_type_node) return "U6__boolc";
32983 if (type == bool_short_type_node) return "U6__bools";
32984 if (type == pixel_type_node) return "u7__pixel";
32985 if (type == bool_int_type_node) return "U6__booli";
32986 if (type == bool_long_type_node) return "U6__booll";
32988 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32989 "g" for IBM extended double, no matter whether it is long double (using
32990 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32991 if (TARGET_FLOAT128_TYPE)
32993 if (type == ieee128_float_type_node)
32994 return "U10__float128";
32996 if (TARGET_LONG_DOUBLE_128)
32998 if (type == long_double_type_node)
32999 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
33001 if (type == ibm128_float_type_node)
33002 return "g";
33006 /* Mangle IBM extended float long double as `g' (__float128) on
33007 powerpc*-linux where long-double-64 previously was the default. */
33008 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
33009 && TARGET_ELF
33010 && TARGET_LONG_DOUBLE_128
33011 && !TARGET_IEEEQUAD)
33012 return "g";
33014 /* For all other types, use normal C++ mangling. */
33015 return NULL;
33018 /* Handle a "longcall" or "shortcall" attribute; arguments as in
33019 struct attribute_spec.handler. */
33021 static tree
33022 rs6000_handle_longcall_attribute (tree *node, tree name,
33023 tree args ATTRIBUTE_UNUSED,
33024 int flags ATTRIBUTE_UNUSED,
33025 bool *no_add_attrs)
33027 if (TREE_CODE (*node) != FUNCTION_TYPE
33028 && TREE_CODE (*node) != FIELD_DECL
33029 && TREE_CODE (*node) != TYPE_DECL)
33031 warning (OPT_Wattributes, "%qE attribute only applies to functions",
33032 name);
33033 *no_add_attrs = true;
33036 return NULL_TREE;
33039 /* Set longcall attributes on all functions declared when
33040 rs6000_default_long_calls is true. */
33041 static void
33042 rs6000_set_default_type_attributes (tree type)
33044 if (rs6000_default_long_calls
33045 && (TREE_CODE (type) == FUNCTION_TYPE
33046 || TREE_CODE (type) == METHOD_TYPE))
33047 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
33048 NULL_TREE,
33049 TYPE_ATTRIBUTES (type));
33051 #if TARGET_MACHO
33052 darwin_set_default_type_attributes (type);
33053 #endif
33056 /* Return a reference suitable for calling a function with the
33057 longcall attribute. */
33060 rs6000_longcall_ref (rtx call_ref)
33062 const char *call_name;
33063 tree node;
33065 if (GET_CODE (call_ref) != SYMBOL_REF)
33066 return call_ref;
33068 /* System V adds '.' to the internal name, so skip them. */
33069 call_name = XSTR (call_ref, 0);
33070 if (*call_name == '.')
33072 while (*call_name == '.')
33073 call_name++;
33075 node = get_identifier (call_name);
33076 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
33079 return force_reg (Pmode, call_ref);
33082 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
33083 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
33084 #endif
33086 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
33087 struct attribute_spec.handler. */
33088 static tree
33089 rs6000_handle_struct_attribute (tree *node, tree name,
33090 tree args ATTRIBUTE_UNUSED,
33091 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
33093 tree *type = NULL;
33094 if (DECL_P (*node))
33096 if (TREE_CODE (*node) == TYPE_DECL)
33097 type = &TREE_TYPE (*node);
33099 else
33100 type = node;
33102 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
33103 || TREE_CODE (*type) == UNION_TYPE)))
33105 warning (OPT_Wattributes, "%qE attribute ignored", name);
33106 *no_add_attrs = true;
33109 else if ((is_attribute_p ("ms_struct", name)
33110 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
33111 || ((is_attribute_p ("gcc_struct", name)
33112 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
33114 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
33115 name);
33116 *no_add_attrs = true;
33119 return NULL_TREE;
33122 static bool
33123 rs6000_ms_bitfield_layout_p (const_tree record_type)
33125 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
33126 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
33127 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
33130 #ifdef USING_ELFOS_H
33132 /* A get_unnamed_section callback, used for switching to toc_section. */
33134 static void
33135 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33137 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33138 && TARGET_MINIMAL_TOC)
33140 if (!toc_initialized)
33142 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33143 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33144 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33145 fprintf (asm_out_file, "\t.tc ");
33146 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33147 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33148 fprintf (asm_out_file, "\n");
33150 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33151 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33152 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33153 fprintf (asm_out_file, " = .+32768\n");
33154 toc_initialized = 1;
33156 else
33157 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33159 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33161 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33162 if (!toc_initialized)
33164 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33165 toc_initialized = 1;
33168 else
33170 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33171 if (!toc_initialized)
33173 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33174 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33175 fprintf (asm_out_file, " = .+32768\n");
33176 toc_initialized = 1;
33181 /* Implement TARGET_ASM_INIT_SECTIONS. */
33183 static void
33184 rs6000_elf_asm_init_sections (void)
33186 toc_section
33187 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33189 sdata2_section
33190 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33191 SDATA2_SECTION_ASM_OP);
33194 /* Implement TARGET_SELECT_RTX_SECTION. */
33196 static section *
33197 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33198 unsigned HOST_WIDE_INT align)
33200 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33201 return toc_section;
33202 else
33203 return default_elf_select_rtx_section (mode, x, align);
33206 /* For a SYMBOL_REF, set generic flags and then perform some
33207 target-specific processing.
33209 When the AIX ABI is requested on a non-AIX system, replace the
33210 function name with the real name (with a leading .) rather than the
33211 function descriptor name. This saves a lot of overriding code to
33212 read the prefixes. */
33214 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33215 static void
33216 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33218 default_encode_section_info (decl, rtl, first);
33220 if (first
33221 && TREE_CODE (decl) == FUNCTION_DECL
33222 && !TARGET_AIX
33223 && DEFAULT_ABI == ABI_AIX)
33225 rtx sym_ref = XEXP (rtl, 0);
33226 size_t len = strlen (XSTR (sym_ref, 0));
33227 char *str = XALLOCAVEC (char, len + 2);
33228 str[0] = '.';
33229 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33230 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33234 static inline bool
33235 compare_section_name (const char *section, const char *templ)
33237 int len;
33239 len = strlen (templ);
33240 return (strncmp (section, templ, len) == 0
33241 && (section[len] == 0 || section[len] == '.'));
33244 bool
33245 rs6000_elf_in_small_data_p (const_tree decl)
33247 if (rs6000_sdata == SDATA_NONE)
33248 return false;
33250 /* We want to merge strings, so we never consider them small data. */
33251 if (TREE_CODE (decl) == STRING_CST)
33252 return false;
33254 /* Functions are never in the small data area. */
33255 if (TREE_CODE (decl) == FUNCTION_DECL)
33256 return false;
33258 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33260 const char *section = DECL_SECTION_NAME (decl);
33261 if (compare_section_name (section, ".sdata")
33262 || compare_section_name (section, ".sdata2")
33263 || compare_section_name (section, ".gnu.linkonce.s")
33264 || compare_section_name (section, ".sbss")
33265 || compare_section_name (section, ".sbss2")
33266 || compare_section_name (section, ".gnu.linkonce.sb")
33267 || strcmp (section, ".PPC.EMB.sdata0") == 0
33268 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33269 return true;
33271 else
33273 /* If we are told not to put readonly data in sdata, then don't. */
33274 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33275 && !rs6000_readonly_in_sdata)
33276 return false;
33278 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33280 if (size > 0
33281 && size <= g_switch_value
33282 /* If it's not public, and we're not going to reference it there,
33283 there's no need to put it in the small data section. */
33284 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33285 return true;
33288 return false;
33291 #endif /* USING_ELFOS_H */
33293 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33295 static bool
33296 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33298 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33301 /* Do not place thread-local symbols refs in the object blocks. */
33303 static bool
33304 rs6000_use_blocks_for_decl_p (const_tree decl)
33306 return !DECL_THREAD_LOCAL_P (decl);
33309 /* Return a REG that occurs in ADDR with coefficient 1.
33310 ADDR can be effectively incremented by incrementing REG.
33312 r0 is special and we must not select it as an address
33313 register by this routine since our caller will try to
33314 increment the returned register via an "la" instruction. */
33317 find_addr_reg (rtx addr)
33319 while (GET_CODE (addr) == PLUS)
33321 if (GET_CODE (XEXP (addr, 0)) == REG
33322 && REGNO (XEXP (addr, 0)) != 0)
33323 addr = XEXP (addr, 0);
33324 else if (GET_CODE (XEXP (addr, 1)) == REG
33325 && REGNO (XEXP (addr, 1)) != 0)
33326 addr = XEXP (addr, 1);
33327 else if (CONSTANT_P (XEXP (addr, 0)))
33328 addr = XEXP (addr, 1);
33329 else if (CONSTANT_P (XEXP (addr, 1)))
33330 addr = XEXP (addr, 0);
33331 else
33332 gcc_unreachable ();
33334 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33335 return addr;
33338 void
33339 rs6000_fatal_bad_address (rtx op)
33341 fatal_insn ("bad address", op);
33344 #if TARGET_MACHO
33346 typedef struct branch_island_d {
33347 tree function_name;
33348 tree label_name;
33349 int line_number;
33350 } branch_island;
33353 static vec<branch_island, va_gc> *branch_islands;
33355 /* Remember to generate a branch island for far calls to the given
33356 function. */
33358 static void
33359 add_compiler_branch_island (tree label_name, tree function_name,
33360 int line_number)
33362 branch_island bi = {function_name, label_name, line_number};
33363 vec_safe_push (branch_islands, bi);
33366 /* Generate far-jump branch islands for everything recorded in
33367 branch_islands. Invoked immediately after the last instruction of
33368 the epilogue has been emitted; the branch islands must be appended
33369 to, and contiguous with, the function body. Mach-O stubs are
33370 generated in machopic_output_stub(). */
33372 static void
33373 macho_branch_islands (void)
33375 char tmp_buf[512];
33377 while (!vec_safe_is_empty (branch_islands))
33379 branch_island *bi = &branch_islands->last ();
33380 const char *label = IDENTIFIER_POINTER (bi->label_name);
33381 const char *name = IDENTIFIER_POINTER (bi->function_name);
33382 char name_buf[512];
33383 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33384 if (name[0] == '*' || name[0] == '&')
33385 strcpy (name_buf, name+1);
33386 else
33388 name_buf[0] = '_';
33389 strcpy (name_buf+1, name);
33391 strcpy (tmp_buf, "\n");
33392 strcat (tmp_buf, label);
33393 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33394 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33395 dbxout_stabd (N_SLINE, bi->line_number);
33396 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33397 if (flag_pic)
33399 if (TARGET_LINK_STACK)
33401 char name[32];
33402 get_ppc476_thunk_name (name);
33403 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33404 strcat (tmp_buf, name);
33405 strcat (tmp_buf, "\n");
33406 strcat (tmp_buf, label);
33407 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33409 else
33411 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33412 strcat (tmp_buf, label);
33413 strcat (tmp_buf, "_pic\n");
33414 strcat (tmp_buf, label);
33415 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33418 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33419 strcat (tmp_buf, name_buf);
33420 strcat (tmp_buf, " - ");
33421 strcat (tmp_buf, label);
33422 strcat (tmp_buf, "_pic)\n");
33424 strcat (tmp_buf, "\tmtlr r0\n");
33426 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33427 strcat (tmp_buf, name_buf);
33428 strcat (tmp_buf, " - ");
33429 strcat (tmp_buf, label);
33430 strcat (tmp_buf, "_pic)\n");
33432 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33434 else
33436 strcat (tmp_buf, ":\nlis r12,hi16(");
33437 strcat (tmp_buf, name_buf);
33438 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33439 strcat (tmp_buf, name_buf);
33440 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33442 output_asm_insn (tmp_buf, 0);
33443 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33444 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33445 dbxout_stabd (N_SLINE, bi->line_number);
33446 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33447 branch_islands->pop ();
33451 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33452 already there or not. */
33454 static int
33455 no_previous_def (tree function_name)
33457 branch_island *bi;
33458 unsigned ix;
33460 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33461 if (function_name == bi->function_name)
33462 return 0;
33463 return 1;
33466 /* GET_PREV_LABEL gets the label name from the previous definition of
33467 the function. */
33469 static tree
33470 get_prev_label (tree function_name)
33472 branch_island *bi;
33473 unsigned ix;
33475 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33476 if (function_name == bi->function_name)
33477 return bi->label_name;
33478 return NULL_TREE;
33481 /* INSN is either a function call or a millicode call. It may have an
33482 unconditional jump in its delay slot.
33484 CALL_DEST is the routine we are calling. */
33486 char *
33487 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33488 int cookie_operand_number)
33490 static char buf[256];
33491 if (darwin_emit_branch_islands
33492 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33493 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33495 tree labelname;
33496 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33498 if (no_previous_def (funname))
33500 rtx label_rtx = gen_label_rtx ();
33501 char *label_buf, temp_buf[256];
33502 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33503 CODE_LABEL_NUMBER (label_rtx));
33504 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33505 labelname = get_identifier (label_buf);
33506 add_compiler_branch_island (labelname, funname, insn_line (insn));
33508 else
33509 labelname = get_prev_label (funname);
33511 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33512 instruction will reach 'foo', otherwise link as 'bl L42'".
33513 "L42" should be a 'branch island', that will do a far jump to
33514 'foo'. Branch islands are generated in
33515 macho_branch_islands(). */
33516 sprintf (buf, "jbsr %%z%d,%.246s",
33517 dest_operand_number, IDENTIFIER_POINTER (labelname));
33519 else
33520 sprintf (buf, "bl %%z%d", dest_operand_number);
33521 return buf;
33524 /* Generate PIC and indirect symbol stubs. */
33526 void
33527 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33529 unsigned int length;
33530 char *symbol_name, *lazy_ptr_name;
33531 char *local_label_0;
33532 static int label = 0;
33534 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33535 symb = (*targetm.strip_name_encoding) (symb);
33538 length = strlen (symb);
33539 symbol_name = XALLOCAVEC (char, length + 32);
33540 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33542 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33543 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33545 if (flag_pic == 2)
33546 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33547 else
33548 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33550 if (flag_pic == 2)
33552 fprintf (file, "\t.align 5\n");
33554 fprintf (file, "%s:\n", stub);
33555 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33557 label++;
33558 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33559 sprintf (local_label_0, "\"L%011d$spb\"", label);
33561 fprintf (file, "\tmflr r0\n");
33562 if (TARGET_LINK_STACK)
33564 char name[32];
33565 get_ppc476_thunk_name (name);
33566 fprintf (file, "\tbl %s\n", name);
33567 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33569 else
33571 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33572 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33574 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33575 lazy_ptr_name, local_label_0);
33576 fprintf (file, "\tmtlr r0\n");
33577 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33578 (TARGET_64BIT ? "ldu" : "lwzu"),
33579 lazy_ptr_name, local_label_0);
33580 fprintf (file, "\tmtctr r12\n");
33581 fprintf (file, "\tbctr\n");
33583 else
33585 fprintf (file, "\t.align 4\n");
33587 fprintf (file, "%s:\n", stub);
33588 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33590 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33591 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33592 (TARGET_64BIT ? "ldu" : "lwzu"),
33593 lazy_ptr_name);
33594 fprintf (file, "\tmtctr r12\n");
33595 fprintf (file, "\tbctr\n");
33598 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33599 fprintf (file, "%s:\n", lazy_ptr_name);
33600 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33601 fprintf (file, "%sdyld_stub_binding_helper\n",
33602 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33605 /* Legitimize PIC addresses. If the address is already
33606 position-independent, we return ORIG. Newly generated
33607 position-independent addresses go into a reg. This is REG if non
33608 zero, otherwise we allocate register(s) as necessary. */
33610 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33613 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33614 rtx reg)
33616 rtx base, offset;
33618 if (reg == NULL && !reload_completed)
33619 reg = gen_reg_rtx (Pmode);
33621 if (GET_CODE (orig) == CONST)
33623 rtx reg_temp;
33625 if (GET_CODE (XEXP (orig, 0)) == PLUS
33626 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33627 return orig;
33629 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33631 /* Use a different reg for the intermediate value, as
33632 it will be marked UNCHANGING. */
33633 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33634 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33635 Pmode, reg_temp);
33636 offset =
33637 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33638 Pmode, reg);
33640 if (GET_CODE (offset) == CONST_INT)
33642 if (SMALL_INT (offset))
33643 return plus_constant (Pmode, base, INTVAL (offset));
33644 else if (!reload_completed)
33645 offset = force_reg (Pmode, offset);
33646 else
33648 rtx mem = force_const_mem (Pmode, orig);
33649 return machopic_legitimize_pic_address (mem, Pmode, reg);
33652 return gen_rtx_PLUS (Pmode, base, offset);
33655 /* Fall back on generic machopic code. */
33656 return machopic_legitimize_pic_address (orig, mode, reg);
33659 /* Output a .machine directive for the Darwin assembler, and call
33660 the generic start_file routine. */
33662 static void
33663 rs6000_darwin_file_start (void)
33665 static const struct
33667 const char *arg;
33668 const char *name;
33669 HOST_WIDE_INT if_set;
33670 } mapping[] = {
33671 { "ppc64", "ppc64", MASK_64BIT },
33672 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33673 { "power4", "ppc970", 0 },
33674 { "G5", "ppc970", 0 },
33675 { "7450", "ppc7450", 0 },
33676 { "7400", "ppc7400", MASK_ALTIVEC },
33677 { "G4", "ppc7400", 0 },
33678 { "750", "ppc750", 0 },
33679 { "740", "ppc750", 0 },
33680 { "G3", "ppc750", 0 },
33681 { "604e", "ppc604e", 0 },
33682 { "604", "ppc604", 0 },
33683 { "603e", "ppc603", 0 },
33684 { "603", "ppc603", 0 },
33685 { "601", "ppc601", 0 },
33686 { NULL, "ppc", 0 } };
33687 const char *cpu_id = "";
33688 size_t i;
33690 rs6000_file_start ();
33691 darwin_file_start ();
33693 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33695 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33696 cpu_id = rs6000_default_cpu;
33698 if (global_options_set.x_rs6000_cpu_index)
33699 cpu_id = processor_target_table[rs6000_cpu_index].name;
33701 /* Look through the mapping array. Pick the first name that either
33702 matches the argument, has a bit set in IF_SET that is also set
33703 in the target flags, or has a NULL name. */
33705 i = 0;
33706 while (mapping[i].arg != NULL
33707 && strcmp (mapping[i].arg, cpu_id) != 0
33708 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33709 i++;
33711 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33714 #endif /* TARGET_MACHO */
33716 #if TARGET_ELF
33717 static int
33718 rs6000_elf_reloc_rw_mask (void)
33720 if (flag_pic)
33721 return 3;
33722 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33723 return 2;
33724 else
33725 return 0;
33728 /* Record an element in the table of global constructors. SYMBOL is
33729 a SYMBOL_REF of the function to be called; PRIORITY is a number
33730 between 0 and MAX_INIT_PRIORITY.
33732 This differs from default_named_section_asm_out_constructor in
33733 that we have special handling for -mrelocatable. */
33735 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33736 static void
33737 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33739 const char *section = ".ctors";
33740 char buf[18];
33742 if (priority != DEFAULT_INIT_PRIORITY)
33744 sprintf (buf, ".ctors.%.5u",
33745 /* Invert the numbering so the linker puts us in the proper
33746 order; constructors are run from right to left, and the
33747 linker sorts in increasing order. */
33748 MAX_INIT_PRIORITY - priority);
33749 section = buf;
33752 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33753 assemble_align (POINTER_SIZE);
33755 if (DEFAULT_ABI == ABI_V4
33756 && (TARGET_RELOCATABLE || flag_pic > 1))
33758 fputs ("\t.long (", asm_out_file);
33759 output_addr_const (asm_out_file, symbol);
33760 fputs (")@fixup\n", asm_out_file);
33762 else
33763 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33766 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33767 static void
33768 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33770 const char *section = ".dtors";
33771 char buf[18];
33773 if (priority != DEFAULT_INIT_PRIORITY)
33775 sprintf (buf, ".dtors.%.5u",
33776 /* Invert the numbering so the linker puts us in the proper
33777 order; constructors are run from right to left, and the
33778 linker sorts in increasing order. */
33779 MAX_INIT_PRIORITY - priority);
33780 section = buf;
33783 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33784 assemble_align (POINTER_SIZE);
33786 if (DEFAULT_ABI == ABI_V4
33787 && (TARGET_RELOCATABLE || flag_pic > 1))
33789 fputs ("\t.long (", asm_out_file);
33790 output_addr_const (asm_out_file, symbol);
33791 fputs (")@fixup\n", asm_out_file);
33793 else
33794 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33797 void
33798 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33800 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33802 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33803 ASM_OUTPUT_LABEL (file, name);
33804 fputs (DOUBLE_INT_ASM_OP, file);
33805 rs6000_output_function_entry (file, name);
33806 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33807 if (DOT_SYMBOLS)
33809 fputs ("\t.size\t", file);
33810 assemble_name (file, name);
33811 fputs (",24\n\t.type\t.", file);
33812 assemble_name (file, name);
33813 fputs (",@function\n", file);
33814 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33816 fputs ("\t.globl\t.", file);
33817 assemble_name (file, name);
33818 putc ('\n', file);
33821 else
33822 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33823 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33824 rs6000_output_function_entry (file, name);
33825 fputs (":\n", file);
33826 return;
33829 int uses_toc;
33830 if (DEFAULT_ABI == ABI_V4
33831 && (TARGET_RELOCATABLE || flag_pic > 1)
33832 && !TARGET_SECURE_PLT
33833 && (!constant_pool_empty_p () || crtl->profile)
33834 && (uses_toc = uses_TOC ()))
33836 char buf[256];
33838 if (uses_toc == 2)
33839 switch_to_other_text_partition ();
33840 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33842 fprintf (file, "\t.long ");
33843 assemble_name (file, toc_label_name);
33844 need_toc_init = 1;
33845 putc ('-', file);
33846 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33847 assemble_name (file, buf);
33848 putc ('\n', file);
33849 if (uses_toc == 2)
33850 switch_to_other_text_partition ();
33853 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33854 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33856 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33858 char buf[256];
33860 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33862 fprintf (file, "\t.quad .TOC.-");
33863 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33864 assemble_name (file, buf);
33865 putc ('\n', file);
33868 if (DEFAULT_ABI == ABI_AIX)
33870 const char *desc_name, *orig_name;
33872 orig_name = (*targetm.strip_name_encoding) (name);
33873 desc_name = orig_name;
33874 while (*desc_name == '.')
33875 desc_name++;
33877 if (TREE_PUBLIC (decl))
33878 fprintf (file, "\t.globl %s\n", desc_name);
33880 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33881 fprintf (file, "%s:\n", desc_name);
33882 fprintf (file, "\t.long %s\n", orig_name);
33883 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33884 fputs ("\t.long 0\n", file);
33885 fprintf (file, "\t.previous\n");
33887 ASM_OUTPUT_LABEL (file, name);
33890 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33891 static void
33892 rs6000_elf_file_end (void)
33894 #ifdef HAVE_AS_GNU_ATTRIBUTE
33895 /* ??? The value emitted depends on options active at file end.
33896 Assume anyone using #pragma or attributes that might change
33897 options knows what they are doing. */
33898 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33899 && rs6000_passes_float)
33901 int fp;
33903 if (TARGET_DF_FPR)
33904 fp = 1;
33905 else if (TARGET_SF_FPR)
33906 fp = 3;
33907 else
33908 fp = 2;
33909 if (rs6000_passes_long_double)
33911 if (!TARGET_LONG_DOUBLE_128)
33912 fp |= 2 * 4;
33913 else if (TARGET_IEEEQUAD)
33914 fp |= 3 * 4;
33915 else
33916 fp |= 1 * 4;
33918 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33920 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33922 if (rs6000_passes_vector)
33923 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33924 (TARGET_ALTIVEC_ABI ? 2 : 1));
33925 if (rs6000_returns_struct)
33926 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33927 aix_struct_return ? 2 : 1);
33929 #endif
33930 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33931 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33932 file_end_indicate_exec_stack ();
33933 #endif
33935 if (flag_split_stack)
33936 file_end_indicate_split_stack ();
33938 if (cpu_builtin_p)
33940 /* We have expanded a CPU builtin, so we need to emit a reference to
33941 the special symbol that LIBC uses to declare it supports the
33942 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33943 switch_to_section (data_section);
33944 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33945 fprintf (asm_out_file, "\t%s %s\n",
33946 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33949 #endif
33951 #if TARGET_XCOFF
33953 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33954 #define HAVE_XCOFF_DWARF_EXTRAS 0
33955 #endif
33957 static enum unwind_info_type
33958 rs6000_xcoff_debug_unwind_info (void)
33960 return UI_NONE;
33963 static void
33964 rs6000_xcoff_asm_output_anchor (rtx symbol)
33966 char buffer[100];
33968 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33969 SYMBOL_REF_BLOCK_OFFSET (symbol));
33970 fprintf (asm_out_file, "%s", SET_ASM_OP);
33971 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33972 fprintf (asm_out_file, ",");
33973 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33974 fprintf (asm_out_file, "\n");
33977 static void
33978 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33980 fputs (GLOBAL_ASM_OP, stream);
33981 RS6000_OUTPUT_BASENAME (stream, name);
33982 putc ('\n', stream);
33985 /* A get_unnamed_decl callback, used for read-only sections. PTR
33986 points to the section string variable. */
33988 static void
33989 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33991 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33992 *(const char *const *) directive,
33993 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33996 /* Likewise for read-write sections. */
33998 static void
33999 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
34001 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
34002 *(const char *const *) directive,
34003 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
34006 static void
34007 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
34009 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
34010 *(const char *const *) directive,
34011 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
34014 /* A get_unnamed_section callback, used for switching to toc_section. */
34016 static void
34017 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
34019 if (TARGET_MINIMAL_TOC)
34021 /* toc_section is always selected at least once from
34022 rs6000_xcoff_file_start, so this is guaranteed to
34023 always be defined once and only once in each file. */
34024 if (!toc_initialized)
34026 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
34027 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
34028 toc_initialized = 1;
34030 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
34031 (TARGET_32BIT ? "" : ",3"));
34033 else
34034 fputs ("\t.toc\n", asm_out_file);
34037 /* Implement TARGET_ASM_INIT_SECTIONS. */
34039 static void
34040 rs6000_xcoff_asm_init_sections (void)
34042 read_only_data_section
34043 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
34044 &xcoff_read_only_section_name);
34046 private_data_section
34047 = get_unnamed_section (SECTION_WRITE,
34048 rs6000_xcoff_output_readwrite_section_asm_op,
34049 &xcoff_private_data_section_name);
34051 tls_data_section
34052 = get_unnamed_section (SECTION_TLS,
34053 rs6000_xcoff_output_tls_section_asm_op,
34054 &xcoff_tls_data_section_name);
34056 tls_private_data_section
34057 = get_unnamed_section (SECTION_TLS,
34058 rs6000_xcoff_output_tls_section_asm_op,
34059 &xcoff_private_data_section_name);
34061 read_only_private_data_section
34062 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
34063 &xcoff_private_data_section_name);
34065 toc_section
34066 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
34068 readonly_data_section = read_only_data_section;
34071 static int
34072 rs6000_xcoff_reloc_rw_mask (void)
34074 return 3;
34077 static void
34078 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
34079 tree decl ATTRIBUTE_UNUSED)
34081 int smclass;
34082 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
34084 if (flags & SECTION_EXCLUDE)
34085 smclass = 4;
34086 else if (flags & SECTION_DEBUG)
34088 fprintf (asm_out_file, "\t.dwsect %s\n", name);
34089 return;
34091 else if (flags & SECTION_CODE)
34092 smclass = 0;
34093 else if (flags & SECTION_TLS)
34094 smclass = 3;
34095 else if (flags & SECTION_WRITE)
34096 smclass = 2;
34097 else
34098 smclass = 1;
34100 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
34101 (flags & SECTION_CODE) ? "." : "",
34102 name, suffix[smclass], flags & SECTION_ENTSIZE);
34105 #define IN_NAMED_SECTION(DECL) \
34106 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34107 && DECL_SECTION_NAME (DECL) != NULL)
34109 static section *
34110 rs6000_xcoff_select_section (tree decl, int reloc,
34111 unsigned HOST_WIDE_INT align)
34113 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34114 named section. */
34115 if (align > BIGGEST_ALIGNMENT)
34117 resolve_unique_section (decl, reloc, true);
34118 if (IN_NAMED_SECTION (decl))
34119 return get_named_section (decl, NULL, reloc);
34122 if (decl_readonly_section (decl, reloc))
34124 if (TREE_PUBLIC (decl))
34125 return read_only_data_section;
34126 else
34127 return read_only_private_data_section;
34129 else
34131 #if HAVE_AS_TLS
34132 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34134 if (TREE_PUBLIC (decl))
34135 return tls_data_section;
34136 else if (bss_initializer_p (decl))
34138 /* Convert to COMMON to emit in BSS. */
34139 DECL_COMMON (decl) = 1;
34140 return tls_comm_section;
34142 else
34143 return tls_private_data_section;
34145 else
34146 #endif
34147 if (TREE_PUBLIC (decl))
34148 return data_section;
34149 else
34150 return private_data_section;
34154 static void
34155 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
34157 const char *name;
34159 /* Use select_section for private data and uninitialized data with
34160 alignment <= BIGGEST_ALIGNMENT. */
34161 if (!TREE_PUBLIC (decl)
34162 || DECL_COMMON (decl)
34163 || (DECL_INITIAL (decl) == NULL_TREE
34164 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
34165 || DECL_INITIAL (decl) == error_mark_node
34166 || (flag_zero_initialized_in_bss
34167 && initializer_zerop (DECL_INITIAL (decl))))
34168 return;
34170 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
34171 name = (*targetm.strip_name_encoding) (name);
34172 set_decl_section_name (decl, name);
34175 /* Select section for constant in constant pool.
34177 On RS/6000, all constants are in the private read-only data area.
34178 However, if this is being placed in the TOC it must be output as a
34179 toc entry. */
34181 static section *
34182 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34183 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34185 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34186 return toc_section;
34187 else
34188 return read_only_private_data_section;
34191 /* Remove any trailing [DS] or the like from the symbol name. */
34193 static const char *
34194 rs6000_xcoff_strip_name_encoding (const char *name)
34196 size_t len;
34197 if (*name == '*')
34198 name++;
34199 len = strlen (name);
34200 if (name[len - 1] == ']')
34201 return ggc_alloc_string (name, len - 4);
34202 else
34203 return name;
34206 /* Section attributes. AIX is always PIC. */
34208 static unsigned int
34209 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34211 unsigned int align;
34212 unsigned int flags = default_section_type_flags (decl, name, reloc);
34214 /* Align to at least UNIT size. */
34215 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34216 align = MIN_UNITS_PER_WORD;
34217 else
34218 /* Increase alignment of large objects if not already stricter. */
34219 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34220 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34221 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34223 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34226 /* Output at beginning of assembler file.
34228 Initialize the section names for the RS/6000 at this point.
34230 Specify filename, including full path, to assembler.
34232 We want to go into the TOC section so at least one .toc will be emitted.
34233 Also, in order to output proper .bs/.es pairs, we need at least one static
34234 [RW] section emitted.
34236 Finally, declare mcount when profiling to make the assembler happy. */
34238 static void
34239 rs6000_xcoff_file_start (void)
34241 rs6000_gen_section_name (&xcoff_bss_section_name,
34242 main_input_filename, ".bss_");
34243 rs6000_gen_section_name (&xcoff_private_data_section_name,
34244 main_input_filename, ".rw_");
34245 rs6000_gen_section_name (&xcoff_read_only_section_name,
34246 main_input_filename, ".ro_");
34247 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34248 main_input_filename, ".tls_");
34249 rs6000_gen_section_name (&xcoff_tbss_section_name,
34250 main_input_filename, ".tbss_[UL]");
34252 fputs ("\t.file\t", asm_out_file);
34253 output_quoted_string (asm_out_file, main_input_filename);
34254 fputc ('\n', asm_out_file);
34255 if (write_symbols != NO_DEBUG)
34256 switch_to_section (private_data_section);
34257 switch_to_section (toc_section);
34258 switch_to_section (text_section);
34259 if (profile_flag)
34260 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34261 rs6000_file_start ();
34264 /* Output at end of assembler file.
34265 On the RS/6000, referencing data should automatically pull in text. */
34267 static void
34268 rs6000_xcoff_file_end (void)
34270 switch_to_section (text_section);
34271 fputs ("_section_.text:\n", asm_out_file);
34272 switch_to_section (data_section);
34273 fputs (TARGET_32BIT
34274 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34275 asm_out_file);
34278 struct declare_alias_data
34280 FILE *file;
34281 bool function_descriptor;
34284 /* Declare alias N. A helper function for for_node_and_aliases. */
34286 static bool
34287 rs6000_declare_alias (struct symtab_node *n, void *d)
34289 struct declare_alias_data *data = (struct declare_alias_data *)d;
34290 /* Main symbol is output specially, because varasm machinery does part of
34291 the job for us - we do not need to declare .globl/lglobs and such. */
34292 if (!n->alias || n->weakref)
34293 return false;
34295 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34296 return false;
34298 /* Prevent assemble_alias from trying to use .set pseudo operation
34299 that does not behave as expected by the middle-end. */
34300 TREE_ASM_WRITTEN (n->decl) = true;
34302 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34303 char *buffer = (char *) alloca (strlen (name) + 2);
34304 char *p;
34305 int dollar_inside = 0;
34307 strcpy (buffer, name);
34308 p = strchr (buffer, '$');
34309 while (p) {
34310 *p = '_';
34311 dollar_inside++;
34312 p = strchr (p + 1, '$');
34314 if (TREE_PUBLIC (n->decl))
34316 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34318 if (dollar_inside) {
34319 if (data->function_descriptor)
34320 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34321 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34323 if (data->function_descriptor)
34325 fputs ("\t.globl .", data->file);
34326 RS6000_OUTPUT_BASENAME (data->file, buffer);
34327 putc ('\n', data->file);
34329 fputs ("\t.globl ", data->file);
34330 RS6000_OUTPUT_BASENAME (data->file, buffer);
34331 putc ('\n', data->file);
34333 #ifdef ASM_WEAKEN_DECL
34334 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34335 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34336 #endif
34338 else
34340 if (dollar_inside)
34342 if (data->function_descriptor)
34343 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34344 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34346 if (data->function_descriptor)
34348 fputs ("\t.lglobl .", data->file);
34349 RS6000_OUTPUT_BASENAME (data->file, buffer);
34350 putc ('\n', data->file);
34352 fputs ("\t.lglobl ", data->file);
34353 RS6000_OUTPUT_BASENAME (data->file, buffer);
34354 putc ('\n', data->file);
34356 if (data->function_descriptor)
34357 fputs (".", data->file);
34358 RS6000_OUTPUT_BASENAME (data->file, buffer);
34359 fputs (":\n", data->file);
34360 return false;
34364 #ifdef HAVE_GAS_HIDDEN
34365 /* Helper function to calculate visibility of a DECL
34366 and return the value as a const string. */
34368 static const char *
34369 rs6000_xcoff_visibility (tree decl)
34371 static const char * const visibility_types[] = {
34372 "", ",protected", ",hidden", ",internal"
34375 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34377 if (TREE_CODE (decl) == FUNCTION_DECL
34378 && cgraph_node::get (decl)
34379 && cgraph_node::get (decl)->instrumentation_clone
34380 && cgraph_node::get (decl)->instrumented_version)
34381 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
34383 return visibility_types[vis];
34385 #endif
34388 /* This macro produces the initial definition of a function name.
34389 On the RS/6000, we need to place an extra '.' in the function name and
34390 output the function descriptor.
34391 Dollar signs are converted to underscores.
34393 The csect for the function will have already been created when
34394 text_section was selected. We do have to go back to that csect, however.
34396 The third and fourth parameters to the .function pseudo-op (16 and 044)
34397 are placeholders which no longer have any use.
34399 Because AIX assembler's .set command has unexpected semantics, we output
34400 all aliases as alternative labels in front of the definition. */
34402 void
34403 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34405 char *buffer = (char *) alloca (strlen (name) + 1);
34406 char *p;
34407 int dollar_inside = 0;
34408 struct declare_alias_data data = {file, false};
34410 strcpy (buffer, name);
34411 p = strchr (buffer, '$');
34412 while (p) {
34413 *p = '_';
34414 dollar_inside++;
34415 p = strchr (p + 1, '$');
34417 if (TREE_PUBLIC (decl))
34419 if (!RS6000_WEAK || !DECL_WEAK (decl))
34421 if (dollar_inside) {
34422 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34423 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34425 fputs ("\t.globl .", file);
34426 RS6000_OUTPUT_BASENAME (file, buffer);
34427 #ifdef HAVE_GAS_HIDDEN
34428 fputs (rs6000_xcoff_visibility (decl), file);
34429 #endif
34430 putc ('\n', file);
34433 else
34435 if (dollar_inside) {
34436 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34437 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34439 fputs ("\t.lglobl .", file);
34440 RS6000_OUTPUT_BASENAME (file, buffer);
34441 putc ('\n', file);
34443 fputs ("\t.csect ", file);
34444 RS6000_OUTPUT_BASENAME (file, buffer);
34445 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34446 RS6000_OUTPUT_BASENAME (file, buffer);
34447 fputs (":\n", file);
34448 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34449 &data, true);
34450 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34451 RS6000_OUTPUT_BASENAME (file, buffer);
34452 fputs (", TOC[tc0], 0\n", file);
34453 in_section = NULL;
34454 switch_to_section (function_section (decl));
34455 putc ('.', file);
34456 RS6000_OUTPUT_BASENAME (file, buffer);
34457 fputs (":\n", file);
34458 data.function_descriptor = true;
34459 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34460 &data, true);
34461 if (!DECL_IGNORED_P (decl))
34463 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34464 xcoffout_declare_function (file, decl, buffer);
34465 else if (write_symbols == DWARF2_DEBUG)
34467 name = (*targetm.strip_name_encoding) (name);
34468 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34471 return;
34475 /* Output assembly language to globalize a symbol from a DECL,
34476 possibly with visibility. */
34478 void
34479 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34481 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34482 fputs (GLOBAL_ASM_OP, stream);
34483 RS6000_OUTPUT_BASENAME (stream, name);
34484 #ifdef HAVE_GAS_HIDDEN
34485 fputs (rs6000_xcoff_visibility (decl), stream);
34486 #endif
34487 putc ('\n', stream);
34490 /* Output assembly language to define a symbol as COMMON from a DECL,
34491 possibly with visibility. */
34493 void
34494 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34495 tree decl ATTRIBUTE_UNUSED,
34496 const char *name,
34497 unsigned HOST_WIDE_INT size,
34498 unsigned HOST_WIDE_INT align)
34500 unsigned HOST_WIDE_INT align2 = 2;
34502 if (align > 32)
34503 align2 = floor_log2 (align / BITS_PER_UNIT);
34504 else if (size > 4)
34505 align2 = 3;
34507 fputs (COMMON_ASM_OP, stream);
34508 RS6000_OUTPUT_BASENAME (stream, name);
34510 fprintf (stream,
34511 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34512 size, align2);
34514 #ifdef HAVE_GAS_HIDDEN
34515 if (decl != NULL)
34516 fputs (rs6000_xcoff_visibility (decl), stream);
34517 #endif
34518 putc ('\n', stream);
34521 /* This macro produces the initial definition of a object (variable) name.
34522 Because AIX assembler's .set command has unexpected semantics, we output
34523 all aliases as alternative labels in front of the definition. */
34525 void
34526 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34528 struct declare_alias_data data = {file, false};
34529 RS6000_OUTPUT_BASENAME (file, name);
34530 fputs (":\n", file);
34531 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34532 &data, true);
34535 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34537 void
34538 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34540 fputs (integer_asm_op (size, FALSE), file);
34541 assemble_name (file, label);
34542 fputs ("-$", file);
34545 /* Output a symbol offset relative to the dbase for the current object.
34546 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34547 signed offsets.
34549 __gcc_unwind_dbase is embedded in all executables/libraries through
34550 libgcc/config/rs6000/crtdbase.S. */
34552 void
34553 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34555 fputs (integer_asm_op (size, FALSE), file);
34556 assemble_name (file, label);
34557 fputs("-__gcc_unwind_dbase", file);
34560 #ifdef HAVE_AS_TLS
34561 static void
34562 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34564 rtx symbol;
34565 int flags;
34566 const char *symname;
34568 default_encode_section_info (decl, rtl, first);
34570 /* Careful not to prod global register variables. */
34571 if (!MEM_P (rtl))
34572 return;
34573 symbol = XEXP (rtl, 0);
34574 if (GET_CODE (symbol) != SYMBOL_REF)
34575 return;
34577 flags = SYMBOL_REF_FLAGS (symbol);
34579 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34580 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34582 SYMBOL_REF_FLAGS (symbol) = flags;
34584 /* Append mapping class to extern decls. */
34585 symname = XSTR (symbol, 0);
34586 if (decl /* sync condition with assemble_external () */
34587 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34588 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34589 || TREE_CODE (decl) == FUNCTION_DECL)
34590 && symname[strlen (symname) - 1] != ']')
34592 char *newname = (char *) alloca (strlen (symname) + 5);
34593 strcpy (newname, symname);
34594 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34595 ? "[DS]" : "[UA]"));
34596 XSTR (symbol, 0) = ggc_strdup (newname);
34599 #endif /* HAVE_AS_TLS */
34600 #endif /* TARGET_XCOFF */
34602 void
34603 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34604 const char *name, const char *val)
34606 fputs ("\t.weak\t", stream);
34607 RS6000_OUTPUT_BASENAME (stream, name);
34608 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34609 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34611 if (TARGET_XCOFF)
34612 fputs ("[DS]", stream);
34613 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34614 if (TARGET_XCOFF)
34615 fputs (rs6000_xcoff_visibility (decl), stream);
34616 #endif
34617 fputs ("\n\t.weak\t.", stream);
34618 RS6000_OUTPUT_BASENAME (stream, name);
34620 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34621 if (TARGET_XCOFF)
34622 fputs (rs6000_xcoff_visibility (decl), stream);
34623 #endif
34624 fputc ('\n', stream);
34625 if (val)
34627 #ifdef ASM_OUTPUT_DEF
34628 ASM_OUTPUT_DEF (stream, name, val);
34629 #endif
34630 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34631 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34633 fputs ("\t.set\t.", stream);
34634 RS6000_OUTPUT_BASENAME (stream, name);
34635 fputs (",.", stream);
34636 RS6000_OUTPUT_BASENAME (stream, val);
34637 fputc ('\n', stream);
34643 /* Return true if INSN should not be copied. */
34645 static bool
34646 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34648 return recog_memoized (insn) >= 0
34649 && get_attr_cannot_copy (insn);
34652 /* Compute a (partial) cost for rtx X. Return true if the complete
34653 cost has been computed, and false if subexpressions should be
34654 scanned. In either case, *TOTAL contains the cost result. */
34656 static bool
34657 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34658 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34660 int code = GET_CODE (x);
34662 switch (code)
34664 /* On the RS/6000, if it is valid in the insn, it is free. */
34665 case CONST_INT:
34666 if (((outer_code == SET
34667 || outer_code == PLUS
34668 || outer_code == MINUS)
34669 && (satisfies_constraint_I (x)
34670 || satisfies_constraint_L (x)))
34671 || (outer_code == AND
34672 && (satisfies_constraint_K (x)
34673 || (mode == SImode
34674 ? satisfies_constraint_L (x)
34675 : satisfies_constraint_J (x))))
34676 || ((outer_code == IOR || outer_code == XOR)
34677 && (satisfies_constraint_K (x)
34678 || (mode == SImode
34679 ? satisfies_constraint_L (x)
34680 : satisfies_constraint_J (x))))
34681 || outer_code == ASHIFT
34682 || outer_code == ASHIFTRT
34683 || outer_code == LSHIFTRT
34684 || outer_code == ROTATE
34685 || outer_code == ROTATERT
34686 || outer_code == ZERO_EXTRACT
34687 || (outer_code == MULT
34688 && satisfies_constraint_I (x))
34689 || ((outer_code == DIV || outer_code == UDIV
34690 || outer_code == MOD || outer_code == UMOD)
34691 && exact_log2 (INTVAL (x)) >= 0)
34692 || (outer_code == COMPARE
34693 && (satisfies_constraint_I (x)
34694 || satisfies_constraint_K (x)))
34695 || ((outer_code == EQ || outer_code == NE)
34696 && (satisfies_constraint_I (x)
34697 || satisfies_constraint_K (x)
34698 || (mode == SImode
34699 ? satisfies_constraint_L (x)
34700 : satisfies_constraint_J (x))))
34701 || (outer_code == GTU
34702 && satisfies_constraint_I (x))
34703 || (outer_code == LTU
34704 && satisfies_constraint_P (x)))
34706 *total = 0;
34707 return true;
34709 else if ((outer_code == PLUS
34710 && reg_or_add_cint_operand (x, VOIDmode))
34711 || (outer_code == MINUS
34712 && reg_or_sub_cint_operand (x, VOIDmode))
34713 || ((outer_code == SET
34714 || outer_code == IOR
34715 || outer_code == XOR)
34716 && (INTVAL (x)
34717 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34719 *total = COSTS_N_INSNS (1);
34720 return true;
34722 /* FALLTHRU */
34724 case CONST_DOUBLE:
34725 case CONST_WIDE_INT:
34726 case CONST:
34727 case HIGH:
34728 case SYMBOL_REF:
34729 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34730 return true;
34732 case MEM:
34733 /* When optimizing for size, MEM should be slightly more expensive
34734 than generating address, e.g., (plus (reg) (const)).
34735 L1 cache latency is about two instructions. */
34736 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34737 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34738 *total += COSTS_N_INSNS (100);
34739 return true;
34741 case LABEL_REF:
34742 *total = 0;
34743 return true;
34745 case PLUS:
34746 case MINUS:
34747 if (FLOAT_MODE_P (mode))
34748 *total = rs6000_cost->fp;
34749 else
34750 *total = COSTS_N_INSNS (1);
34751 return false;
34753 case MULT:
34754 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34755 && satisfies_constraint_I (XEXP (x, 1)))
34757 if (INTVAL (XEXP (x, 1)) >= -256
34758 && INTVAL (XEXP (x, 1)) <= 255)
34759 *total = rs6000_cost->mulsi_const9;
34760 else
34761 *total = rs6000_cost->mulsi_const;
34763 else if (mode == SFmode)
34764 *total = rs6000_cost->fp;
34765 else if (FLOAT_MODE_P (mode))
34766 *total = rs6000_cost->dmul;
34767 else if (mode == DImode)
34768 *total = rs6000_cost->muldi;
34769 else
34770 *total = rs6000_cost->mulsi;
34771 return false;
34773 case FMA:
34774 if (mode == SFmode)
34775 *total = rs6000_cost->fp;
34776 else
34777 *total = rs6000_cost->dmul;
34778 break;
34780 case DIV:
34781 case MOD:
34782 if (FLOAT_MODE_P (mode))
34784 *total = mode == DFmode ? rs6000_cost->ddiv
34785 : rs6000_cost->sdiv;
34786 return false;
34788 /* FALLTHRU */
34790 case UDIV:
34791 case UMOD:
34792 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34793 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34795 if (code == DIV || code == MOD)
34796 /* Shift, addze */
34797 *total = COSTS_N_INSNS (2);
34798 else
34799 /* Shift */
34800 *total = COSTS_N_INSNS (1);
34802 else
34804 if (GET_MODE (XEXP (x, 1)) == DImode)
34805 *total = rs6000_cost->divdi;
34806 else
34807 *total = rs6000_cost->divsi;
34809 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34810 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34811 *total += COSTS_N_INSNS (2);
34812 return false;
34814 case CTZ:
34815 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34816 return false;
34818 case FFS:
34819 *total = COSTS_N_INSNS (4);
34820 return false;
34822 case POPCOUNT:
34823 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34824 return false;
34826 case PARITY:
34827 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34828 return false;
34830 case NOT:
34831 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34832 *total = 0;
34833 else
34834 *total = COSTS_N_INSNS (1);
34835 return false;
34837 case AND:
34838 if (CONST_INT_P (XEXP (x, 1)))
34840 rtx left = XEXP (x, 0);
34841 rtx_code left_code = GET_CODE (left);
34843 /* rotate-and-mask: 1 insn. */
34844 if ((left_code == ROTATE
34845 || left_code == ASHIFT
34846 || left_code == LSHIFTRT)
34847 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34849 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34850 if (!CONST_INT_P (XEXP (left, 1)))
34851 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34852 *total += COSTS_N_INSNS (1);
34853 return true;
34856 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34857 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34858 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34859 || (val & 0xffff) == val
34860 || (val & 0xffff0000) == val
34861 || ((val & 0xffff) == 0 && mode == SImode))
34863 *total = rtx_cost (left, mode, AND, 0, speed);
34864 *total += COSTS_N_INSNS (1);
34865 return true;
34868 /* 2 insns. */
34869 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34871 *total = rtx_cost (left, mode, AND, 0, speed);
34872 *total += COSTS_N_INSNS (2);
34873 return true;
34877 *total = COSTS_N_INSNS (1);
34878 return false;
34880 case IOR:
34881 /* FIXME */
34882 *total = COSTS_N_INSNS (1);
34883 return true;
34885 case CLZ:
34886 case XOR:
34887 case ZERO_EXTRACT:
34888 *total = COSTS_N_INSNS (1);
34889 return false;
34891 case ASHIFT:
34892 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34893 the sign extend and shift separately within the insn. */
34894 if (TARGET_EXTSWSLI && mode == DImode
34895 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34896 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34898 *total = 0;
34899 return false;
34901 /* fall through */
34903 case ASHIFTRT:
34904 case LSHIFTRT:
34905 case ROTATE:
34906 case ROTATERT:
34907 /* Handle mul_highpart. */
34908 if (outer_code == TRUNCATE
34909 && GET_CODE (XEXP (x, 0)) == MULT)
34911 if (mode == DImode)
34912 *total = rs6000_cost->muldi;
34913 else
34914 *total = rs6000_cost->mulsi;
34915 return true;
34917 else if (outer_code == AND)
34918 *total = 0;
34919 else
34920 *total = COSTS_N_INSNS (1);
34921 return false;
34923 case SIGN_EXTEND:
34924 case ZERO_EXTEND:
34925 if (GET_CODE (XEXP (x, 0)) == MEM)
34926 *total = 0;
34927 else
34928 *total = COSTS_N_INSNS (1);
34929 return false;
34931 case COMPARE:
34932 case NEG:
34933 case ABS:
34934 if (!FLOAT_MODE_P (mode))
34936 *total = COSTS_N_INSNS (1);
34937 return false;
34939 /* FALLTHRU */
34941 case FLOAT:
34942 case UNSIGNED_FLOAT:
34943 case FIX:
34944 case UNSIGNED_FIX:
34945 case FLOAT_TRUNCATE:
34946 *total = rs6000_cost->fp;
34947 return false;
34949 case FLOAT_EXTEND:
34950 if (mode == DFmode)
34951 *total = rs6000_cost->sfdf_convert;
34952 else
34953 *total = rs6000_cost->fp;
34954 return false;
34956 case UNSPEC:
34957 switch (XINT (x, 1))
34959 case UNSPEC_FRSP:
34960 *total = rs6000_cost->fp;
34961 return true;
34963 default:
34964 break;
34966 break;
34968 case CALL:
34969 case IF_THEN_ELSE:
34970 if (!speed)
34972 *total = COSTS_N_INSNS (1);
34973 return true;
34975 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34977 *total = rs6000_cost->fp;
34978 return false;
34980 break;
34982 case NE:
34983 case EQ:
34984 case GTU:
34985 case LTU:
34986 /* Carry bit requires mode == Pmode.
34987 NEG or PLUS already counted so only add one. */
34988 if (mode == Pmode
34989 && (outer_code == NEG || outer_code == PLUS))
34991 *total = COSTS_N_INSNS (1);
34992 return true;
34994 /* FALLTHRU */
34996 case GT:
34997 case LT:
34998 case UNORDERED:
34999 if (outer_code == SET)
35001 if (XEXP (x, 1) == const0_rtx)
35003 *total = COSTS_N_INSNS (2);
35004 return true;
35006 else
35008 *total = COSTS_N_INSNS (3);
35009 return false;
35012 /* CC COMPARE. */
35013 if (outer_code == COMPARE)
35015 *total = 0;
35016 return true;
35018 break;
35020 default:
35021 break;
35024 return false;
35027 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
35029 static bool
35030 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
35031 int opno, int *total, bool speed)
35033 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
35035 fprintf (stderr,
35036 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
35037 "opno = %d, total = %d, speed = %s, x:\n",
35038 ret ? "complete" : "scan inner",
35039 GET_MODE_NAME (mode),
35040 GET_RTX_NAME (outer_code),
35041 opno,
35042 *total,
35043 speed ? "true" : "false");
35045 debug_rtx (x);
35047 return ret;
35050 static int
35051 rs6000_insn_cost (rtx_insn *insn, bool speed)
35053 if (recog_memoized (insn) < 0)
35054 return 0;
35056 if (!speed)
35057 return get_attr_length (insn);
35059 int cost = get_attr_cost (insn);
35060 if (cost > 0)
35061 return cost;
35063 int n = get_attr_length (insn) / 4;
35064 enum attr_type type = get_attr_type (insn);
35066 switch (type)
35068 case TYPE_LOAD:
35069 case TYPE_FPLOAD:
35070 case TYPE_VECLOAD:
35071 cost = COSTS_N_INSNS (n + 1);
35072 break;
35074 case TYPE_MUL:
35075 switch (get_attr_size (insn))
35077 case SIZE_8:
35078 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
35079 break;
35080 case SIZE_16:
35081 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
35082 break;
35083 case SIZE_32:
35084 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
35085 break;
35086 case SIZE_64:
35087 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
35088 break;
35089 default:
35090 gcc_unreachable ();
35092 break;
35093 case TYPE_DIV:
35094 switch (get_attr_size (insn))
35096 case SIZE_32:
35097 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
35098 break;
35099 case SIZE_64:
35100 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
35101 break;
35102 default:
35103 gcc_unreachable ();
35105 break;
35107 case TYPE_FP:
35108 cost = n * rs6000_cost->fp;
35109 break;
35110 case TYPE_DMUL:
35111 cost = n * rs6000_cost->dmul;
35112 break;
35113 case TYPE_SDIV:
35114 cost = n * rs6000_cost->sdiv;
35115 break;
35116 case TYPE_DDIV:
35117 cost = n * rs6000_cost->ddiv;
35118 break;
35120 case TYPE_SYNC:
35121 case TYPE_LOAD_L:
35122 case TYPE_MFCR:
35123 case TYPE_MFCRF:
35124 cost = COSTS_N_INSNS (n + 2);
35125 break;
35127 default:
35128 cost = COSTS_N_INSNS (n);
35131 return cost;
35134 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35136 static int
35137 rs6000_debug_address_cost (rtx x, machine_mode mode,
35138 addr_space_t as, bool speed)
35140 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
35142 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35143 ret, speed ? "true" : "false");
35144 debug_rtx (x);
35146 return ret;
35150 /* A C expression returning the cost of moving data from a register of class
35151 CLASS1 to one of CLASS2. */
35153 static int
35154 rs6000_register_move_cost (machine_mode mode,
35155 reg_class_t from, reg_class_t to)
35157 int ret;
35159 if (TARGET_DEBUG_COST)
35160 dbg_cost_ctrl++;
35162 /* Moves from/to GENERAL_REGS. */
35163 if (reg_classes_intersect_p (to, GENERAL_REGS)
35164 || reg_classes_intersect_p (from, GENERAL_REGS))
35166 reg_class_t rclass = from;
35168 if (! reg_classes_intersect_p (to, GENERAL_REGS))
35169 rclass = to;
35171 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
35172 ret = (rs6000_memory_move_cost (mode, rclass, false)
35173 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
35175 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35176 shift. */
35177 else if (rclass == CR_REGS)
35178 ret = 4;
35180 /* For those processors that have slow LR/CTR moves, make them more
35181 expensive than memory in order to bias spills to memory .*/
35182 else if ((rs6000_tune == PROCESSOR_POWER6
35183 || rs6000_tune == PROCESSOR_POWER7
35184 || rs6000_tune == PROCESSOR_POWER8
35185 || rs6000_tune == PROCESSOR_POWER9)
35186 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35187 ret = 6 * hard_regno_nregs (0, mode);
35189 else
35190 /* A move will cost one instruction per GPR moved. */
35191 ret = 2 * hard_regno_nregs (0, mode);
35194 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35195 else if (VECTOR_MEM_VSX_P (mode)
35196 && reg_classes_intersect_p (to, VSX_REGS)
35197 && reg_classes_intersect_p (from, VSX_REGS))
35198 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35200 /* Moving between two similar registers is just one instruction. */
35201 else if (reg_classes_intersect_p (to, from))
35202 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35204 /* Everything else has to go through GENERAL_REGS. */
35205 else
35206 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35207 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35209 if (TARGET_DEBUG_COST)
35211 if (dbg_cost_ctrl == 1)
35212 fprintf (stderr,
35213 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35214 ret, GET_MODE_NAME (mode), reg_class_names[from],
35215 reg_class_names[to]);
35216 dbg_cost_ctrl--;
35219 return ret;
35222 /* A C expressions returning the cost of moving data of MODE from a register to
35223 or from memory. */
35225 static int
35226 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35227 bool in ATTRIBUTE_UNUSED)
35229 int ret;
35231 if (TARGET_DEBUG_COST)
35232 dbg_cost_ctrl++;
35234 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35235 ret = 4 * hard_regno_nregs (0, mode);
35236 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35237 || reg_classes_intersect_p (rclass, VSX_REGS)))
35238 ret = 4 * hard_regno_nregs (32, mode);
35239 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35240 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35241 else
35242 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35244 if (TARGET_DEBUG_COST)
35246 if (dbg_cost_ctrl == 1)
35247 fprintf (stderr,
35248 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35249 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35250 dbg_cost_ctrl--;
35253 return ret;
35256 /* Returns a code for a target-specific builtin that implements
35257 reciprocal of the function, or NULL_TREE if not available. */
35259 static tree
35260 rs6000_builtin_reciprocal (tree fndecl)
35262 switch (DECL_FUNCTION_CODE (fndecl))
35264 case VSX_BUILTIN_XVSQRTDP:
35265 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35266 return NULL_TREE;
35268 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35270 case VSX_BUILTIN_XVSQRTSP:
35271 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35272 return NULL_TREE;
35274 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35276 default:
35277 return NULL_TREE;
35281 /* Load up a constant. If the mode is a vector mode, splat the value across
35282 all of the vector elements. */
35284 static rtx
35285 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35287 rtx reg;
35289 if (mode == SFmode || mode == DFmode)
35291 rtx d = const_double_from_real_value (dconst, mode);
35292 reg = force_reg (mode, d);
35294 else if (mode == V4SFmode)
35296 rtx d = const_double_from_real_value (dconst, SFmode);
35297 rtvec v = gen_rtvec (4, d, d, d, d);
35298 reg = gen_reg_rtx (mode);
35299 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35301 else if (mode == V2DFmode)
35303 rtx d = const_double_from_real_value (dconst, DFmode);
35304 rtvec v = gen_rtvec (2, d, d);
35305 reg = gen_reg_rtx (mode);
35306 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35308 else
35309 gcc_unreachable ();
35311 return reg;
35314 /* Generate an FMA instruction. */
35316 static void
35317 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35319 machine_mode mode = GET_MODE (target);
35320 rtx dst;
35322 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35323 gcc_assert (dst != NULL);
35325 if (dst != target)
35326 emit_move_insn (target, dst);
35329 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35331 static void
35332 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35334 machine_mode mode = GET_MODE (dst);
35335 rtx r;
35337 /* This is a tad more complicated, since the fnma_optab is for
35338 a different expression: fma(-m1, m2, a), which is the same
35339 thing except in the case of signed zeros.
35341 Fortunately we know that if FMA is supported that FNMSUB is
35342 also supported in the ISA. Just expand it directly. */
35344 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35346 r = gen_rtx_NEG (mode, a);
35347 r = gen_rtx_FMA (mode, m1, m2, r);
35348 r = gen_rtx_NEG (mode, r);
35349 emit_insn (gen_rtx_SET (dst, r));
35352 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35353 add a reg_note saying that this was a division. Support both scalar and
35354 vector divide. Assumes no trapping math and finite arguments. */
35356 void
35357 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35359 machine_mode mode = GET_MODE (dst);
35360 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35361 int i;
35363 /* Low precision estimates guarantee 5 bits of accuracy. High
35364 precision estimates guarantee 14 bits of accuracy. SFmode
35365 requires 23 bits of accuracy. DFmode requires 52 bits of
35366 accuracy. Each pass at least doubles the accuracy, leading
35367 to the following. */
35368 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35369 if (mode == DFmode || mode == V2DFmode)
35370 passes++;
35372 enum insn_code code = optab_handler (smul_optab, mode);
35373 insn_gen_fn gen_mul = GEN_FCN (code);
35375 gcc_assert (code != CODE_FOR_nothing);
35377 one = rs6000_load_constant_and_splat (mode, dconst1);
35379 /* x0 = 1./d estimate */
35380 x0 = gen_reg_rtx (mode);
35381 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35382 UNSPEC_FRES)));
35384 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35385 if (passes > 1) {
35387 /* e0 = 1. - d * x0 */
35388 e0 = gen_reg_rtx (mode);
35389 rs6000_emit_nmsub (e0, d, x0, one);
35391 /* x1 = x0 + e0 * x0 */
35392 x1 = gen_reg_rtx (mode);
35393 rs6000_emit_madd (x1, e0, x0, x0);
35395 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35396 ++i, xprev = xnext, eprev = enext) {
35398 /* enext = eprev * eprev */
35399 enext = gen_reg_rtx (mode);
35400 emit_insn (gen_mul (enext, eprev, eprev));
35402 /* xnext = xprev + enext * xprev */
35403 xnext = gen_reg_rtx (mode);
35404 rs6000_emit_madd (xnext, enext, xprev, xprev);
35407 } else
35408 xprev = x0;
35410 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35412 /* u = n * xprev */
35413 u = gen_reg_rtx (mode);
35414 emit_insn (gen_mul (u, n, xprev));
35416 /* v = n - (d * u) */
35417 v = gen_reg_rtx (mode);
35418 rs6000_emit_nmsub (v, d, u, n);
35420 /* dst = (v * xprev) + u */
35421 rs6000_emit_madd (dst, v, xprev, u);
35423 if (note_p)
35424 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35427 /* Goldschmidt's Algorithm for single/double-precision floating point
35428 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35430 void
35431 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35433 machine_mode mode = GET_MODE (src);
35434 rtx e = gen_reg_rtx (mode);
35435 rtx g = gen_reg_rtx (mode);
35436 rtx h = gen_reg_rtx (mode);
35438 /* Low precision estimates guarantee 5 bits of accuracy. High
35439 precision estimates guarantee 14 bits of accuracy. SFmode
35440 requires 23 bits of accuracy. DFmode requires 52 bits of
35441 accuracy. Each pass at least doubles the accuracy, leading
35442 to the following. */
35443 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35444 if (mode == DFmode || mode == V2DFmode)
35445 passes++;
35447 int i;
35448 rtx mhalf;
35449 enum insn_code code = optab_handler (smul_optab, mode);
35450 insn_gen_fn gen_mul = GEN_FCN (code);
35452 gcc_assert (code != CODE_FOR_nothing);
35454 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35456 /* e = rsqrt estimate */
35457 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35458 UNSPEC_RSQRT)));
35460 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35461 if (!recip)
35463 rtx zero = force_reg (mode, CONST0_RTX (mode));
35465 if (mode == SFmode)
35467 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35468 e, zero, mode, 0);
35469 if (target != e)
35470 emit_move_insn (e, target);
35472 else
35474 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35475 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35479 /* g = sqrt estimate. */
35480 emit_insn (gen_mul (g, e, src));
35481 /* h = 1/(2*sqrt) estimate. */
35482 emit_insn (gen_mul (h, e, mhalf));
35484 if (recip)
35486 if (passes == 1)
35488 rtx t = gen_reg_rtx (mode);
35489 rs6000_emit_nmsub (t, g, h, mhalf);
35490 /* Apply correction directly to 1/rsqrt estimate. */
35491 rs6000_emit_madd (dst, e, t, e);
35493 else
35495 for (i = 0; i < passes; i++)
35497 rtx t1 = gen_reg_rtx (mode);
35498 rtx g1 = gen_reg_rtx (mode);
35499 rtx h1 = gen_reg_rtx (mode);
35501 rs6000_emit_nmsub (t1, g, h, mhalf);
35502 rs6000_emit_madd (g1, g, t1, g);
35503 rs6000_emit_madd (h1, h, t1, h);
35505 g = g1;
35506 h = h1;
35508 /* Multiply by 2 for 1/rsqrt. */
35509 emit_insn (gen_add3_insn (dst, h, h));
35512 else
35514 rtx t = gen_reg_rtx (mode);
35515 rs6000_emit_nmsub (t, g, h, mhalf);
35516 rs6000_emit_madd (dst, g, t, g);
35519 return;
35522 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35523 (Power7) targets. DST is the target, and SRC is the argument operand. */
35525 void
35526 rs6000_emit_popcount (rtx dst, rtx src)
35528 machine_mode mode = GET_MODE (dst);
35529 rtx tmp1, tmp2;
35531 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35532 if (TARGET_POPCNTD)
35534 if (mode == SImode)
35535 emit_insn (gen_popcntdsi2 (dst, src));
35536 else
35537 emit_insn (gen_popcntddi2 (dst, src));
35538 return;
35541 tmp1 = gen_reg_rtx (mode);
35543 if (mode == SImode)
35545 emit_insn (gen_popcntbsi2 (tmp1, src));
35546 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35547 NULL_RTX, 0);
35548 tmp2 = force_reg (SImode, tmp2);
35549 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35551 else
35553 emit_insn (gen_popcntbdi2 (tmp1, src));
35554 tmp2 = expand_mult (DImode, tmp1,
35555 GEN_INT ((HOST_WIDE_INT)
35556 0x01010101 << 32 | 0x01010101),
35557 NULL_RTX, 0);
35558 tmp2 = force_reg (DImode, tmp2);
35559 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35564 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35565 target, and SRC is the argument operand. */
35567 void
35568 rs6000_emit_parity (rtx dst, rtx src)
35570 machine_mode mode = GET_MODE (dst);
35571 rtx tmp;
35573 tmp = gen_reg_rtx (mode);
35575 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35576 if (TARGET_CMPB)
35578 if (mode == SImode)
35580 emit_insn (gen_popcntbsi2 (tmp, src));
35581 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35583 else
35585 emit_insn (gen_popcntbdi2 (tmp, src));
35586 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35588 return;
35591 if (mode == SImode)
35593 /* Is mult+shift >= shift+xor+shift+xor? */
35594 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35596 rtx tmp1, tmp2, tmp3, tmp4;
35598 tmp1 = gen_reg_rtx (SImode);
35599 emit_insn (gen_popcntbsi2 (tmp1, src));
35601 tmp2 = gen_reg_rtx (SImode);
35602 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35603 tmp3 = gen_reg_rtx (SImode);
35604 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35606 tmp4 = gen_reg_rtx (SImode);
35607 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35608 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35610 else
35611 rs6000_emit_popcount (tmp, src);
35612 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35614 else
35616 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35617 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35619 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35621 tmp1 = gen_reg_rtx (DImode);
35622 emit_insn (gen_popcntbdi2 (tmp1, src));
35624 tmp2 = gen_reg_rtx (DImode);
35625 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35626 tmp3 = gen_reg_rtx (DImode);
35627 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35629 tmp4 = gen_reg_rtx (DImode);
35630 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35631 tmp5 = gen_reg_rtx (DImode);
35632 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35634 tmp6 = gen_reg_rtx (DImode);
35635 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35636 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35638 else
35639 rs6000_emit_popcount (tmp, src);
35640 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35644 /* Expand an Altivec constant permutation for little endian mode.
35645 OP0 and OP1 are the input vectors and TARGET is the output vector.
35646 SEL specifies the constant permutation vector.
35648 There are two issues: First, the two input operands must be
35649 swapped so that together they form a double-wide array in LE
35650 order. Second, the vperm instruction has surprising behavior
35651 in LE mode: it interprets the elements of the source vectors
35652 in BE mode ("left to right") and interprets the elements of
35653 the destination vector in LE mode ("right to left"). To
35654 correct for this, we must subtract each element of the permute
35655 control vector from 31.
35657 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35658 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35659 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35660 serve as the permute control vector. Then, in BE mode,
35662 vperm 9,10,11,12
35664 places the desired result in vr9. However, in LE mode the
35665 vector contents will be
35667 vr10 = 00000003 00000002 00000001 00000000
35668 vr11 = 00000007 00000006 00000005 00000004
35670 The result of the vperm using the same permute control vector is
35672 vr9 = 05000000 07000000 01000000 03000000
35674 That is, the leftmost 4 bytes of vr10 are interpreted as the
35675 source for the rightmost 4 bytes of vr9, and so on.
35677 If we change the permute control vector to
35679 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35681 and issue
35683 vperm 9,11,10,12
35685 we get the desired
35687 vr9 = 00000006 00000004 00000002 00000000. */
35689 static void
35690 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35691 const vec_perm_indices &sel)
35693 unsigned int i;
35694 rtx perm[16];
35695 rtx constv, unspec;
35697 /* Unpack and adjust the constant selector. */
35698 for (i = 0; i < 16; ++i)
35700 unsigned int elt = 31 - (sel[i] & 31);
35701 perm[i] = GEN_INT (elt);
35704 /* Expand to a permute, swapping the inputs and using the
35705 adjusted selector. */
35706 if (!REG_P (op0))
35707 op0 = force_reg (V16QImode, op0);
35708 if (!REG_P (op1))
35709 op1 = force_reg (V16QImode, op1);
35711 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35712 constv = force_reg (V16QImode, constv);
35713 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35714 UNSPEC_VPERM);
35715 if (!REG_P (target))
35717 rtx tmp = gen_reg_rtx (V16QImode);
35718 emit_move_insn (tmp, unspec);
35719 unspec = tmp;
35722 emit_move_insn (target, unspec);
35725 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35726 permute control vector. But here it's not a constant, so we must
35727 generate a vector NAND or NOR to do the adjustment. */
35729 void
35730 altivec_expand_vec_perm_le (rtx operands[4])
35732 rtx notx, iorx, unspec;
35733 rtx target = operands[0];
35734 rtx op0 = operands[1];
35735 rtx op1 = operands[2];
35736 rtx sel = operands[3];
35737 rtx tmp = target;
35738 rtx norreg = gen_reg_rtx (V16QImode);
35739 machine_mode mode = GET_MODE (target);
35741 /* Get everything in regs so the pattern matches. */
35742 if (!REG_P (op0))
35743 op0 = force_reg (mode, op0);
35744 if (!REG_P (op1))
35745 op1 = force_reg (mode, op1);
35746 if (!REG_P (sel))
35747 sel = force_reg (V16QImode, sel);
35748 if (!REG_P (target))
35749 tmp = gen_reg_rtx (mode);
35751 if (TARGET_P9_VECTOR)
35753 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35754 UNSPEC_VPERMR);
35756 else
35758 /* Invert the selector with a VNAND if available, else a VNOR.
35759 The VNAND is preferred for future fusion opportunities. */
35760 notx = gen_rtx_NOT (V16QImode, sel);
35761 iorx = (TARGET_P8_VECTOR
35762 ? gen_rtx_IOR (V16QImode, notx, notx)
35763 : gen_rtx_AND (V16QImode, notx, notx));
35764 emit_insn (gen_rtx_SET (norreg, iorx));
35766 /* Permute with operands reversed and adjusted selector. */
35767 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35768 UNSPEC_VPERM);
35771 /* Copy into target, possibly by way of a register. */
35772 if (!REG_P (target))
35774 emit_move_insn (tmp, unspec);
35775 unspec = tmp;
35778 emit_move_insn (target, unspec);
35781 /* Expand an Altivec constant permutation. Return true if we match
35782 an efficient implementation; false to fall back to VPERM.
35784 OP0 and OP1 are the input vectors and TARGET is the output vector.
35785 SEL specifies the constant permutation vector. */
35787 static bool
35788 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35789 const vec_perm_indices &sel)
35791 struct altivec_perm_insn {
35792 HOST_WIDE_INT mask;
35793 enum insn_code impl;
35794 unsigned char perm[16];
35796 static const struct altivec_perm_insn patterns[] = {
35797 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35798 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35799 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35800 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35801 { OPTION_MASK_ALTIVEC,
35802 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35803 : CODE_FOR_altivec_vmrglb_direct),
35804 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35805 { OPTION_MASK_ALTIVEC,
35806 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35807 : CODE_FOR_altivec_vmrglh_direct),
35808 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35809 { OPTION_MASK_ALTIVEC,
35810 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35811 : CODE_FOR_altivec_vmrglw_direct),
35812 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35813 { OPTION_MASK_ALTIVEC,
35814 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35815 : CODE_FOR_altivec_vmrghb_direct),
35816 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35817 { OPTION_MASK_ALTIVEC,
35818 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35819 : CODE_FOR_altivec_vmrghh_direct),
35820 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35821 { OPTION_MASK_ALTIVEC,
35822 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35823 : CODE_FOR_altivec_vmrghw_direct),
35824 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35825 { OPTION_MASK_P8_VECTOR,
35826 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35827 : CODE_FOR_p8_vmrgow_v4sf_direct),
35828 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35829 { OPTION_MASK_P8_VECTOR,
35830 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35831 : CODE_FOR_p8_vmrgew_v4sf_direct),
35832 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35835 unsigned int i, j, elt, which;
35836 unsigned char perm[16];
35837 rtx x;
35838 bool one_vec;
35840 /* Unpack the constant selector. */
35841 for (i = which = 0; i < 16; ++i)
35843 elt = sel[i] & 31;
35844 which |= (elt < 16 ? 1 : 2);
35845 perm[i] = elt;
35848 /* Simplify the constant selector based on operands. */
35849 switch (which)
35851 default:
35852 gcc_unreachable ();
35854 case 3:
35855 one_vec = false;
35856 if (!rtx_equal_p (op0, op1))
35857 break;
35858 /* FALLTHRU */
35860 case 2:
35861 for (i = 0; i < 16; ++i)
35862 perm[i] &= 15;
35863 op0 = op1;
35864 one_vec = true;
35865 break;
35867 case 1:
35868 op1 = op0;
35869 one_vec = true;
35870 break;
35873 /* Look for splat patterns. */
35874 if (one_vec)
35876 elt = perm[0];
35878 for (i = 0; i < 16; ++i)
35879 if (perm[i] != elt)
35880 break;
35881 if (i == 16)
35883 if (!BYTES_BIG_ENDIAN)
35884 elt = 15 - elt;
35885 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35886 return true;
35889 if (elt % 2 == 0)
35891 for (i = 0; i < 16; i += 2)
35892 if (perm[i] != elt || perm[i + 1] != elt + 1)
35893 break;
35894 if (i == 16)
35896 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35897 x = gen_reg_rtx (V8HImode);
35898 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35899 GEN_INT (field)));
35900 emit_move_insn (target, gen_lowpart (V16QImode, x));
35901 return true;
35905 if (elt % 4 == 0)
35907 for (i = 0; i < 16; i += 4)
35908 if (perm[i] != elt
35909 || perm[i + 1] != elt + 1
35910 || perm[i + 2] != elt + 2
35911 || perm[i + 3] != elt + 3)
35912 break;
35913 if (i == 16)
35915 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35916 x = gen_reg_rtx (V4SImode);
35917 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35918 GEN_INT (field)));
35919 emit_move_insn (target, gen_lowpart (V16QImode, x));
35920 return true;
35925 /* Look for merge and pack patterns. */
35926 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35928 bool swapped;
35930 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35931 continue;
35933 elt = patterns[j].perm[0];
35934 if (perm[0] == elt)
35935 swapped = false;
35936 else if (perm[0] == elt + 16)
35937 swapped = true;
35938 else
35939 continue;
35940 for (i = 1; i < 16; ++i)
35942 elt = patterns[j].perm[i];
35943 if (swapped)
35944 elt = (elt >= 16 ? elt - 16 : elt + 16);
35945 else if (one_vec && elt >= 16)
35946 elt -= 16;
35947 if (perm[i] != elt)
35948 break;
35950 if (i == 16)
35952 enum insn_code icode = patterns[j].impl;
35953 machine_mode omode = insn_data[icode].operand[0].mode;
35954 machine_mode imode = insn_data[icode].operand[1].mode;
35956 /* For little-endian, don't use vpkuwum and vpkuhum if the
35957 underlying vector type is not V4SI and V8HI, respectively.
35958 For example, using vpkuwum with a V8HI picks up the even
35959 halfwords (BE numbering) when the even halfwords (LE
35960 numbering) are what we need. */
35961 if (!BYTES_BIG_ENDIAN
35962 && icode == CODE_FOR_altivec_vpkuwum_direct
35963 && ((GET_CODE (op0) == REG
35964 && GET_MODE (op0) != V4SImode)
35965 || (GET_CODE (op0) == SUBREG
35966 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35967 continue;
35968 if (!BYTES_BIG_ENDIAN
35969 && icode == CODE_FOR_altivec_vpkuhum_direct
35970 && ((GET_CODE (op0) == REG
35971 && GET_MODE (op0) != V8HImode)
35972 || (GET_CODE (op0) == SUBREG
35973 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35974 continue;
35976 /* For little-endian, the two input operands must be swapped
35977 (or swapped back) to ensure proper right-to-left numbering
35978 from 0 to 2N-1. */
35979 if (swapped ^ !BYTES_BIG_ENDIAN)
35980 std::swap (op0, op1);
35981 if (imode != V16QImode)
35983 op0 = gen_lowpart (imode, op0);
35984 op1 = gen_lowpart (imode, op1);
35986 if (omode == V16QImode)
35987 x = target;
35988 else
35989 x = gen_reg_rtx (omode);
35990 emit_insn (GEN_FCN (icode) (x, op0, op1));
35991 if (omode != V16QImode)
35992 emit_move_insn (target, gen_lowpart (V16QImode, x));
35993 return true;
35997 if (!BYTES_BIG_ENDIAN)
35999 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
36000 return true;
36003 return false;
36006 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
36007 Return true if we match an efficient implementation. */
36009 static bool
36010 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
36011 unsigned char perm0, unsigned char perm1)
36013 rtx x;
36015 /* If both selectors come from the same operand, fold to single op. */
36016 if ((perm0 & 2) == (perm1 & 2))
36018 if (perm0 & 2)
36019 op0 = op1;
36020 else
36021 op1 = op0;
36023 /* If both operands are equal, fold to simpler permutation. */
36024 if (rtx_equal_p (op0, op1))
36026 perm0 = perm0 & 1;
36027 perm1 = (perm1 & 1) + 2;
36029 /* If the first selector comes from the second operand, swap. */
36030 else if (perm0 & 2)
36032 if (perm1 & 2)
36033 return false;
36034 perm0 -= 2;
36035 perm1 += 2;
36036 std::swap (op0, op1);
36038 /* If the second selector does not come from the second operand, fail. */
36039 else if ((perm1 & 2) == 0)
36040 return false;
36042 /* Success! */
36043 if (target != NULL)
36045 machine_mode vmode, dmode;
36046 rtvec v;
36048 vmode = GET_MODE (target);
36049 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
36050 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
36051 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
36052 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
36053 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
36054 emit_insn (gen_rtx_SET (target, x));
36056 return true;
36059 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
36061 static bool
36062 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
36063 rtx op1, const vec_perm_indices &sel)
36065 bool testing_p = !target;
36067 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
36068 if (TARGET_ALTIVEC && testing_p)
36069 return true;
36071 /* Check for ps_merge* or xxpermdi insns. */
36072 if ((vmode == V2SFmode && TARGET_PAIRED_FLOAT)
36073 || ((vmode == V2DFmode || vmode == V2DImode)
36074 && VECTOR_MEM_VSX_P (vmode)))
36076 if (testing_p)
36078 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
36079 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
36081 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
36082 return true;
36085 if (TARGET_ALTIVEC)
36087 /* Force the target-independent code to lower to V16QImode. */
36088 if (vmode != V16QImode)
36089 return false;
36090 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
36091 return true;
36094 return false;
36097 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
36098 OP0 and OP1 are the input vectors and TARGET is the output vector.
36099 PERM specifies the constant permutation vector. */
36101 static void
36102 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
36103 machine_mode vmode, const vec_perm_builder &perm)
36105 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
36106 if (x != target)
36107 emit_move_insn (target, x);
36110 /* Expand an extract even operation. */
36112 void
36113 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
36115 machine_mode vmode = GET_MODE (target);
36116 unsigned i, nelt = GET_MODE_NUNITS (vmode);
36117 vec_perm_builder perm (nelt, nelt, 1);
36119 for (i = 0; i < nelt; i++)
36120 perm.quick_push (i * 2);
36122 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36125 /* Expand a vector interleave operation. */
36127 void
36128 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
36130 machine_mode vmode = GET_MODE (target);
36131 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
36132 vec_perm_builder perm (nelt, nelt, 1);
36134 high = (highp ? 0 : nelt / 2);
36135 for (i = 0; i < nelt / 2; i++)
36137 perm.quick_push (i + high);
36138 perm.quick_push (i + nelt + high);
36141 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
36144 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36145 void
36146 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
36148 HOST_WIDE_INT hwi_scale (scale);
36149 REAL_VALUE_TYPE r_pow;
36150 rtvec v = rtvec_alloc (2);
36151 rtx elt;
36152 rtx scale_vec = gen_reg_rtx (V2DFmode);
36153 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
36154 elt = const_double_from_real_value (r_pow, DFmode);
36155 RTVEC_ELT (v, 0) = elt;
36156 RTVEC_ELT (v, 1) = elt;
36157 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
36158 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
36161 /* Return an RTX representing where to find the function value of a
36162 function returning MODE. */
36163 static rtx
36164 rs6000_complex_function_value (machine_mode mode)
36166 unsigned int regno;
36167 rtx r1, r2;
36168 machine_mode inner = GET_MODE_INNER (mode);
36169 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
36171 if (TARGET_FLOAT128_TYPE
36172 && (mode == KCmode
36173 || (mode == TCmode && TARGET_IEEEQUAD)))
36174 regno = ALTIVEC_ARG_RETURN;
36176 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36177 regno = FP_ARG_RETURN;
36179 else
36181 regno = GP_ARG_RETURN;
36183 /* 32-bit is OK since it'll go in r3/r4. */
36184 if (TARGET_32BIT && inner_bytes >= 4)
36185 return gen_rtx_REG (mode, regno);
36188 if (inner_bytes >= 8)
36189 return gen_rtx_REG (mode, regno);
36191 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36192 const0_rtx);
36193 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36194 GEN_INT (inner_bytes));
36195 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36198 /* Return an rtx describing a return value of MODE as a PARALLEL
36199 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36200 stride REG_STRIDE. */
36202 static rtx
36203 rs6000_parallel_return (machine_mode mode,
36204 int n_elts, machine_mode elt_mode,
36205 unsigned int regno, unsigned int reg_stride)
36207 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36209 int i;
36210 for (i = 0; i < n_elts; i++)
36212 rtx r = gen_rtx_REG (elt_mode, regno);
36213 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36214 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36215 regno += reg_stride;
36218 return par;
36221 /* Target hook for TARGET_FUNCTION_VALUE.
36223 An integer value is in r3 and a floating-point value is in fp1,
36224 unless -msoft-float. */
36226 static rtx
36227 rs6000_function_value (const_tree valtype,
36228 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36229 bool outgoing ATTRIBUTE_UNUSED)
36231 machine_mode mode;
36232 unsigned int regno;
36233 machine_mode elt_mode;
36234 int n_elts;
36236 /* Special handling for structs in darwin64. */
36237 if (TARGET_MACHO
36238 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36240 CUMULATIVE_ARGS valcum;
36241 rtx valret;
36243 valcum.words = 0;
36244 valcum.fregno = FP_ARG_MIN_REG;
36245 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36246 /* Do a trial code generation as if this were going to be passed as
36247 an argument; if any part goes in memory, we return NULL. */
36248 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36249 if (valret)
36250 return valret;
36251 /* Otherwise fall through to standard ABI rules. */
36254 mode = TYPE_MODE (valtype);
36256 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36257 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36259 int first_reg, n_regs;
36261 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36263 /* _Decimal128 must use even/odd register pairs. */
36264 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36265 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36267 else
36269 first_reg = ALTIVEC_ARG_RETURN;
36270 n_regs = 1;
36273 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36276 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36277 if (TARGET_32BIT && TARGET_POWERPC64)
36278 switch (mode)
36280 default:
36281 break;
36282 case E_DImode:
36283 case E_SCmode:
36284 case E_DCmode:
36285 case E_TCmode:
36286 int count = GET_MODE_SIZE (mode) / 4;
36287 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36290 if ((INTEGRAL_TYPE_P (valtype)
36291 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36292 || POINTER_TYPE_P (valtype))
36293 mode = TARGET_32BIT ? SImode : DImode;
36295 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36296 /* _Decimal128 must use an even/odd register pair. */
36297 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36298 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36299 && !FLOAT128_VECTOR_P (mode)
36300 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
36301 regno = FP_ARG_RETURN;
36302 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36303 && targetm.calls.split_complex_arg)
36304 return rs6000_complex_function_value (mode);
36305 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36306 return register is used in both cases, and we won't see V2DImode/V2DFmode
36307 for pure altivec, combine the two cases. */
36308 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36309 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36310 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36311 regno = ALTIVEC_ARG_RETURN;
36312 else
36313 regno = GP_ARG_RETURN;
36315 return gen_rtx_REG (mode, regno);
36318 /* Define how to find the value returned by a library function
36319 assuming the value has mode MODE. */
36321 rs6000_libcall_value (machine_mode mode)
36323 unsigned int regno;
36325 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36326 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36327 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36329 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36330 /* _Decimal128 must use an even/odd register pair. */
36331 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36332 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
36333 && TARGET_HARD_FLOAT
36334 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
36335 regno = FP_ARG_RETURN;
36336 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36337 return register is used in both cases, and we won't see V2DImode/V2DFmode
36338 for pure altivec, combine the two cases. */
36339 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36340 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36341 regno = ALTIVEC_ARG_RETURN;
36342 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36343 return rs6000_complex_function_value (mode);
36344 else
36345 regno = GP_ARG_RETURN;
36347 return gen_rtx_REG (mode, regno);
36350 /* Compute register pressure classes. We implement the target hook to avoid
36351 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36352 lead to incorrect estimates of number of available registers and therefor
36353 increased register pressure/spill. */
36354 static int
36355 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36357 int n;
36359 n = 0;
36360 pressure_classes[n++] = GENERAL_REGS;
36361 if (TARGET_VSX)
36362 pressure_classes[n++] = VSX_REGS;
36363 else
36365 if (TARGET_ALTIVEC)
36366 pressure_classes[n++] = ALTIVEC_REGS;
36367 if (TARGET_HARD_FLOAT)
36368 pressure_classes[n++] = FLOAT_REGS;
36370 pressure_classes[n++] = CR_REGS;
36371 pressure_classes[n++] = SPECIAL_REGS;
36373 return n;
36376 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36377 Frame pointer elimination is automatically handled.
36379 For the RS/6000, if frame pointer elimination is being done, we would like
36380 to convert ap into fp, not sp.
36382 We need r30 if -mminimal-toc was specified, and there are constant pool
36383 references. */
36385 static bool
36386 rs6000_can_eliminate (const int from, const int to)
36388 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36389 ? ! frame_pointer_needed
36390 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36391 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36392 || constant_pool_empty_p ()
36393 : true);
36396 /* Define the offset between two registers, FROM to be eliminated and its
36397 replacement TO, at the start of a routine. */
36398 HOST_WIDE_INT
36399 rs6000_initial_elimination_offset (int from, int to)
36401 rs6000_stack_t *info = rs6000_stack_info ();
36402 HOST_WIDE_INT offset;
36404 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36405 offset = info->push_p ? 0 : -info->total_size;
36406 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36408 offset = info->push_p ? 0 : -info->total_size;
36409 if (FRAME_GROWS_DOWNWARD)
36410 offset += info->fixed_size + info->vars_size + info->parm_size;
36412 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36413 offset = FRAME_GROWS_DOWNWARD
36414 ? info->fixed_size + info->vars_size + info->parm_size
36415 : 0;
36416 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36417 offset = info->total_size;
36418 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36419 offset = info->push_p ? info->total_size : 0;
36420 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36421 offset = 0;
36422 else
36423 gcc_unreachable ();
36425 return offset;
36428 /* Fill in sizes of registers used by unwinder. */
36430 static void
36431 rs6000_init_dwarf_reg_sizes_extra (tree address)
36433 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36435 int i;
36436 machine_mode mode = TYPE_MODE (char_type_node);
36437 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36438 rtx mem = gen_rtx_MEM (BLKmode, addr);
36439 rtx value = gen_int_mode (16, mode);
36441 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36442 The unwinder still needs to know the size of Altivec registers. */
36444 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36446 int column = DWARF_REG_TO_UNWIND_COLUMN
36447 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36448 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36450 emit_move_insn (adjust_address (mem, mode, offset), value);
36455 /* Map internal gcc register numbers to debug format register numbers.
36456 FORMAT specifies the type of debug register number to use:
36457 0 -- debug information, except for frame-related sections
36458 1 -- DWARF .debug_frame section
36459 2 -- DWARF .eh_frame section */
36461 unsigned int
36462 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36464 /* Except for the above, we use the internal number for non-DWARF
36465 debug information, and also for .eh_frame. */
36466 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36467 return regno;
36469 /* On some platforms, we use the standard DWARF register
36470 numbering for .debug_info and .debug_frame. */
36471 #ifdef RS6000_USE_DWARF_NUMBERING
36472 if (regno <= 63)
36473 return regno;
36474 if (regno == LR_REGNO)
36475 return 108;
36476 if (regno == CTR_REGNO)
36477 return 109;
36478 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36479 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36480 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36481 to the DWARF reg for CR. */
36482 if (format == 1 && regno == CR2_REGNO)
36483 return 64;
36484 if (CR_REGNO_P (regno))
36485 return regno - CR0_REGNO + 86;
36486 if (regno == CA_REGNO)
36487 return 101; /* XER */
36488 if (ALTIVEC_REGNO_P (regno))
36489 return regno - FIRST_ALTIVEC_REGNO + 1124;
36490 if (regno == VRSAVE_REGNO)
36491 return 356;
36492 if (regno == VSCR_REGNO)
36493 return 67;
36494 #endif
36495 return regno;
36498 /* target hook eh_return_filter_mode */
36499 static scalar_int_mode
36500 rs6000_eh_return_filter_mode (void)
36502 return TARGET_32BIT ? SImode : word_mode;
36505 /* Target hook for scalar_mode_supported_p. */
36506 static bool
36507 rs6000_scalar_mode_supported_p (scalar_mode mode)
36509 /* -m32 does not support TImode. This is the default, from
36510 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36511 same ABI as for -m32. But default_scalar_mode_supported_p allows
36512 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36513 for -mpowerpc64. */
36514 if (TARGET_32BIT && mode == TImode)
36515 return false;
36517 if (DECIMAL_FLOAT_MODE_P (mode))
36518 return default_decimal_float_supported_p ();
36519 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36520 return true;
36521 else
36522 return default_scalar_mode_supported_p (mode);
36525 /* Target hook for vector_mode_supported_p. */
36526 static bool
36527 rs6000_vector_mode_supported_p (machine_mode mode)
36530 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36531 return true;
36533 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36534 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36535 double-double. */
36536 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36537 return true;
36539 else
36540 return false;
36543 /* Target hook for floatn_mode. */
36544 static opt_scalar_float_mode
36545 rs6000_floatn_mode (int n, bool extended)
36547 if (extended)
36549 switch (n)
36551 case 32:
36552 return DFmode;
36554 case 64:
36555 if (TARGET_FLOAT128_TYPE)
36556 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36557 else
36558 return opt_scalar_float_mode ();
36560 case 128:
36561 return opt_scalar_float_mode ();
36563 default:
36564 /* Those are the only valid _FloatNx types. */
36565 gcc_unreachable ();
36568 else
36570 switch (n)
36572 case 32:
36573 return SFmode;
36575 case 64:
36576 return DFmode;
36578 case 128:
36579 if (TARGET_FLOAT128_TYPE)
36580 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36581 else
36582 return opt_scalar_float_mode ();
36584 default:
36585 return opt_scalar_float_mode ();
36591 /* Target hook for c_mode_for_suffix. */
36592 static machine_mode
36593 rs6000_c_mode_for_suffix (char suffix)
36595 if (TARGET_FLOAT128_TYPE)
36597 if (suffix == 'q' || suffix == 'Q')
36598 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36600 /* At the moment, we are not defining a suffix for IBM extended double.
36601 If/when the default for -mabi=ieeelongdouble is changed, and we want
36602 to support __ibm128 constants in legacy library code, we may need to
36603 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36604 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36605 __float80 constants. */
36608 return VOIDmode;
36611 /* Target hook for invalid_arg_for_unprototyped_fn. */
36612 static const char *
36613 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36615 return (!rs6000_darwin64_abi
36616 && typelist == 0
36617 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36618 && (funcdecl == NULL_TREE
36619 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36620 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36621 ? N_("AltiVec argument passed to unprototyped function")
36622 : NULL;
36625 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36626 setup by using __stack_chk_fail_local hidden function instead of
36627 calling __stack_chk_fail directly. Otherwise it is better to call
36628 __stack_chk_fail directly. */
36630 static tree ATTRIBUTE_UNUSED
36631 rs6000_stack_protect_fail (void)
36633 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36634 ? default_hidden_stack_protect_fail ()
36635 : default_external_stack_protect_fail ();
36638 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36640 #if TARGET_ELF
36641 static unsigned HOST_WIDE_INT
36642 rs6000_asan_shadow_offset (void)
36644 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36646 #endif
36648 /* Mask options that we want to support inside of attribute((target)) and
36649 #pragma GCC target operations. Note, we do not include things like
36650 64/32-bit, endianness, hard/soft floating point, etc. that would have
36651 different calling sequences. */
36653 struct rs6000_opt_mask {
36654 const char *name; /* option name */
36655 HOST_WIDE_INT mask; /* mask to set */
36656 bool invert; /* invert sense of mask */
36657 bool valid_target; /* option is a target option */
36660 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36662 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36663 { "cmpb", OPTION_MASK_CMPB, false, true },
36664 { "crypto", OPTION_MASK_CRYPTO, false, true },
36665 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36666 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36667 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36668 false, true },
36669 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36670 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36671 { "fprnd", OPTION_MASK_FPRND, false, true },
36672 { "hard-dfp", OPTION_MASK_DFP, false, true },
36673 { "htm", OPTION_MASK_HTM, false, true },
36674 { "isel", OPTION_MASK_ISEL, false, true },
36675 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36676 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36677 { "modulo", OPTION_MASK_MODULO, false, true },
36678 { "mulhw", OPTION_MASK_MULHW, false, true },
36679 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36680 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36681 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36682 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36683 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36684 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36685 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36686 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36687 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36688 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36689 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36690 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36691 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36692 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36693 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36694 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36695 { "string", 0, false, true },
36696 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36697 { "update", OPTION_MASK_NO_UPDATE, true , true },
36698 { "vsx", OPTION_MASK_VSX, false, true },
36699 #ifdef OPTION_MASK_64BIT
36700 #if TARGET_AIX_OS
36701 { "aix64", OPTION_MASK_64BIT, false, false },
36702 { "aix32", OPTION_MASK_64BIT, true, false },
36703 #else
36704 { "64", OPTION_MASK_64BIT, false, false },
36705 { "32", OPTION_MASK_64BIT, true, false },
36706 #endif
36707 #endif
36708 #ifdef OPTION_MASK_EABI
36709 { "eabi", OPTION_MASK_EABI, false, false },
36710 #endif
36711 #ifdef OPTION_MASK_LITTLE_ENDIAN
36712 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36713 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36714 #endif
36715 #ifdef OPTION_MASK_RELOCATABLE
36716 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36717 #endif
36718 #ifdef OPTION_MASK_STRICT_ALIGN
36719 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36720 #endif
36721 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36722 { "string", 0, false, false },
36725 /* Builtin mask mapping for printing the flags. */
36726 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36728 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36729 { "vsx", RS6000_BTM_VSX, false, false },
36730 { "paired", RS6000_BTM_PAIRED, false, false },
36731 { "fre", RS6000_BTM_FRE, false, false },
36732 { "fres", RS6000_BTM_FRES, false, false },
36733 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36734 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36735 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36736 { "cell", RS6000_BTM_CELL, false, false },
36737 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36738 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36739 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36740 { "crypto", RS6000_BTM_CRYPTO, false, false },
36741 { "htm", RS6000_BTM_HTM, false, false },
36742 { "hard-dfp", RS6000_BTM_DFP, false, false },
36743 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36744 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36745 { "float128", RS6000_BTM_FLOAT128, false, false },
36746 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36749 /* Option variables that we want to support inside attribute((target)) and
36750 #pragma GCC target operations. */
36752 struct rs6000_opt_var {
36753 const char *name; /* option name */
36754 size_t global_offset; /* offset of the option in global_options. */
36755 size_t target_offset; /* offset of the option in target options. */
36758 static struct rs6000_opt_var const rs6000_opt_vars[] =
36760 { "friz",
36761 offsetof (struct gcc_options, x_TARGET_FRIZ),
36762 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36763 { "avoid-indexed-addresses",
36764 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36765 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36766 { "paired",
36767 offsetof (struct gcc_options, x_rs6000_paired_float),
36768 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36769 { "longcall",
36770 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36771 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36772 { "optimize-swaps",
36773 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36774 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36775 { "allow-movmisalign",
36776 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36777 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36778 { "sched-groups",
36779 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36780 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36781 { "always-hint",
36782 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36783 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36784 { "align-branch-targets",
36785 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36786 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36787 { "tls-markers",
36788 offsetof (struct gcc_options, x_tls_markers),
36789 offsetof (struct cl_target_option, x_tls_markers), },
36790 { "sched-prolog",
36791 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36792 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36793 { "sched-epilog",
36794 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36795 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36796 { "speculate-indirect-jumps",
36797 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36798 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36801 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36802 parsing. Return true if there were no errors. */
36804 static bool
36805 rs6000_inner_target_options (tree args, bool attr_p)
36807 bool ret = true;
36809 if (args == NULL_TREE)
36812 else if (TREE_CODE (args) == STRING_CST)
36814 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36815 char *q;
36817 while ((q = strtok (p, ",")) != NULL)
36819 bool error_p = false;
36820 bool not_valid_p = false;
36821 const char *cpu_opt = NULL;
36823 p = NULL;
36824 if (strncmp (q, "cpu=", 4) == 0)
36826 int cpu_index = rs6000_cpu_name_lookup (q+4);
36827 if (cpu_index >= 0)
36828 rs6000_cpu_index = cpu_index;
36829 else
36831 error_p = true;
36832 cpu_opt = q+4;
36835 else if (strncmp (q, "tune=", 5) == 0)
36837 int tune_index = rs6000_cpu_name_lookup (q+5);
36838 if (tune_index >= 0)
36839 rs6000_tune_index = tune_index;
36840 else
36842 error_p = true;
36843 cpu_opt = q+5;
36846 else
36848 size_t i;
36849 bool invert = false;
36850 char *r = q;
36852 error_p = true;
36853 if (strncmp (r, "no-", 3) == 0)
36855 invert = true;
36856 r += 3;
36859 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36860 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36862 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36864 if (!rs6000_opt_masks[i].valid_target)
36865 not_valid_p = true;
36866 else
36868 error_p = false;
36869 rs6000_isa_flags_explicit |= mask;
36871 /* VSX needs altivec, so -mvsx automagically sets
36872 altivec and disables -mavoid-indexed-addresses. */
36873 if (!invert)
36875 if (mask == OPTION_MASK_VSX)
36877 mask |= OPTION_MASK_ALTIVEC;
36878 TARGET_AVOID_XFORM = 0;
36882 if (rs6000_opt_masks[i].invert)
36883 invert = !invert;
36885 if (invert)
36886 rs6000_isa_flags &= ~mask;
36887 else
36888 rs6000_isa_flags |= mask;
36890 break;
36893 if (error_p && !not_valid_p)
36895 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36896 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36898 size_t j = rs6000_opt_vars[i].global_offset;
36899 *((int *) ((char *)&global_options + j)) = !invert;
36900 error_p = false;
36901 not_valid_p = false;
36902 break;
36907 if (error_p)
36909 const char *eprefix, *esuffix;
36911 ret = false;
36912 if (attr_p)
36914 eprefix = "__attribute__((__target__(";
36915 esuffix = ")))";
36917 else
36919 eprefix = "#pragma GCC target ";
36920 esuffix = "";
36923 if (cpu_opt)
36924 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36925 q, esuffix);
36926 else if (not_valid_p)
36927 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36928 else
36929 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36934 else if (TREE_CODE (args) == TREE_LIST)
36938 tree value = TREE_VALUE (args);
36939 if (value)
36941 bool ret2 = rs6000_inner_target_options (value, attr_p);
36942 if (!ret2)
36943 ret = false;
36945 args = TREE_CHAIN (args);
36947 while (args != NULL_TREE);
36950 else
36952 error ("attribute %<target%> argument not a string");
36953 return false;
36956 return ret;
36959 /* Print out the target options as a list for -mdebug=target. */
36961 static void
36962 rs6000_debug_target_options (tree args, const char *prefix)
36964 if (args == NULL_TREE)
36965 fprintf (stderr, "%s<NULL>", prefix);
36967 else if (TREE_CODE (args) == STRING_CST)
36969 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36970 char *q;
36972 while ((q = strtok (p, ",")) != NULL)
36974 p = NULL;
36975 fprintf (stderr, "%s\"%s\"", prefix, q);
36976 prefix = ", ";
36980 else if (TREE_CODE (args) == TREE_LIST)
36984 tree value = TREE_VALUE (args);
36985 if (value)
36987 rs6000_debug_target_options (value, prefix);
36988 prefix = ", ";
36990 args = TREE_CHAIN (args);
36992 while (args != NULL_TREE);
36995 else
36996 gcc_unreachable ();
36998 return;
37002 /* Hook to validate attribute((target("..."))). */
37004 static bool
37005 rs6000_valid_attribute_p (tree fndecl,
37006 tree ARG_UNUSED (name),
37007 tree args,
37008 int flags)
37010 struct cl_target_option cur_target;
37011 bool ret;
37012 tree old_optimize;
37013 tree new_target, new_optimize;
37014 tree func_optimize;
37016 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
37018 if (TARGET_DEBUG_TARGET)
37020 tree tname = DECL_NAME (fndecl);
37021 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
37022 if (tname)
37023 fprintf (stderr, "function: %.*s\n",
37024 (int) IDENTIFIER_LENGTH (tname),
37025 IDENTIFIER_POINTER (tname));
37026 else
37027 fprintf (stderr, "function: unknown\n");
37029 fprintf (stderr, "args:");
37030 rs6000_debug_target_options (args, " ");
37031 fprintf (stderr, "\n");
37033 if (flags)
37034 fprintf (stderr, "flags: 0x%x\n", flags);
37036 fprintf (stderr, "--------------------\n");
37039 /* attribute((target("default"))) does nothing, beyond
37040 affecting multi-versioning. */
37041 if (TREE_VALUE (args)
37042 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
37043 && TREE_CHAIN (args) == NULL_TREE
37044 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
37045 return true;
37047 old_optimize = build_optimization_node (&global_options);
37048 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
37050 /* If the function changed the optimization levels as well as setting target
37051 options, start with the optimizations specified. */
37052 if (func_optimize && func_optimize != old_optimize)
37053 cl_optimization_restore (&global_options,
37054 TREE_OPTIMIZATION (func_optimize));
37056 /* The target attributes may also change some optimization flags, so update
37057 the optimization options if necessary. */
37058 cl_target_option_save (&cur_target, &global_options);
37059 rs6000_cpu_index = rs6000_tune_index = -1;
37060 ret = rs6000_inner_target_options (args, true);
37062 /* Set up any additional state. */
37063 if (ret)
37065 ret = rs6000_option_override_internal (false);
37066 new_target = build_target_option_node (&global_options);
37068 else
37069 new_target = NULL;
37071 new_optimize = build_optimization_node (&global_options);
37073 if (!new_target)
37074 ret = false;
37076 else if (fndecl)
37078 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
37080 if (old_optimize != new_optimize)
37081 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
37084 cl_target_option_restore (&global_options, &cur_target);
37086 if (old_optimize != new_optimize)
37087 cl_optimization_restore (&global_options,
37088 TREE_OPTIMIZATION (old_optimize));
37090 return ret;
37094 /* Hook to validate the current #pragma GCC target and set the state, and
37095 update the macros based on what was changed. If ARGS is NULL, then
37096 POP_TARGET is used to reset the options. */
37098 bool
37099 rs6000_pragma_target_parse (tree args, tree pop_target)
37101 tree prev_tree = build_target_option_node (&global_options);
37102 tree cur_tree;
37103 struct cl_target_option *prev_opt, *cur_opt;
37104 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
37105 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
37107 if (TARGET_DEBUG_TARGET)
37109 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
37110 fprintf (stderr, "args:");
37111 rs6000_debug_target_options (args, " ");
37112 fprintf (stderr, "\n");
37114 if (pop_target)
37116 fprintf (stderr, "pop_target:\n");
37117 debug_tree (pop_target);
37119 else
37120 fprintf (stderr, "pop_target: <NULL>\n");
37122 fprintf (stderr, "--------------------\n");
37125 if (! args)
37127 cur_tree = ((pop_target)
37128 ? pop_target
37129 : target_option_default_node);
37130 cl_target_option_restore (&global_options,
37131 TREE_TARGET_OPTION (cur_tree));
37133 else
37135 rs6000_cpu_index = rs6000_tune_index = -1;
37136 if (!rs6000_inner_target_options (args, false)
37137 || !rs6000_option_override_internal (false)
37138 || (cur_tree = build_target_option_node (&global_options))
37139 == NULL_TREE)
37141 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37142 fprintf (stderr, "invalid pragma\n");
37144 return false;
37148 target_option_current_node = cur_tree;
37149 rs6000_activate_target_options (target_option_current_node);
37151 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37152 change the macros that are defined. */
37153 if (rs6000_target_modify_macros_ptr)
37155 prev_opt = TREE_TARGET_OPTION (prev_tree);
37156 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37157 prev_flags = prev_opt->x_rs6000_isa_flags;
37159 cur_opt = TREE_TARGET_OPTION (cur_tree);
37160 cur_flags = cur_opt->x_rs6000_isa_flags;
37161 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37163 diff_bumask = (prev_bumask ^ cur_bumask);
37164 diff_flags = (prev_flags ^ cur_flags);
37166 if ((diff_flags != 0) || (diff_bumask != 0))
37168 /* Delete old macros. */
37169 rs6000_target_modify_macros_ptr (false,
37170 prev_flags & diff_flags,
37171 prev_bumask & diff_bumask);
37173 /* Define new macros. */
37174 rs6000_target_modify_macros_ptr (true,
37175 cur_flags & diff_flags,
37176 cur_bumask & diff_bumask);
37180 return true;
37184 /* Remember the last target of rs6000_set_current_function. */
37185 static GTY(()) tree rs6000_previous_fndecl;
37187 /* Restore target's globals from NEW_TREE and invalidate the
37188 rs6000_previous_fndecl cache. */
37190 void
37191 rs6000_activate_target_options (tree new_tree)
37193 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37194 if (TREE_TARGET_GLOBALS (new_tree))
37195 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37196 else if (new_tree == target_option_default_node)
37197 restore_target_globals (&default_target_globals);
37198 else
37199 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37200 rs6000_previous_fndecl = NULL_TREE;
37203 /* Establish appropriate back-end context for processing the function
37204 FNDECL. The argument might be NULL to indicate processing at top
37205 level, outside of any function scope. */
37206 static void
37207 rs6000_set_current_function (tree fndecl)
37209 if (TARGET_DEBUG_TARGET)
37211 fprintf (stderr, "\n==================== rs6000_set_current_function");
37213 if (fndecl)
37214 fprintf (stderr, ", fndecl %s (%p)",
37215 (DECL_NAME (fndecl)
37216 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37217 : "<unknown>"), (void *)fndecl);
37219 if (rs6000_previous_fndecl)
37220 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37222 fprintf (stderr, "\n");
37225 /* Only change the context if the function changes. This hook is called
37226 several times in the course of compiling a function, and we don't want to
37227 slow things down too much or call target_reinit when it isn't safe. */
37228 if (fndecl == rs6000_previous_fndecl)
37229 return;
37231 tree old_tree;
37232 if (rs6000_previous_fndecl == NULL_TREE)
37233 old_tree = target_option_current_node;
37234 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37235 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37236 else
37237 old_tree = target_option_default_node;
37239 tree new_tree;
37240 if (fndecl == NULL_TREE)
37242 if (old_tree != target_option_current_node)
37243 new_tree = target_option_current_node;
37244 else
37245 new_tree = NULL_TREE;
37247 else
37249 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37250 if (new_tree == NULL_TREE)
37251 new_tree = target_option_default_node;
37254 if (TARGET_DEBUG_TARGET)
37256 if (new_tree)
37258 fprintf (stderr, "\nnew fndecl target specific options:\n");
37259 debug_tree (new_tree);
37262 if (old_tree)
37264 fprintf (stderr, "\nold fndecl target specific options:\n");
37265 debug_tree (old_tree);
37268 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37269 fprintf (stderr, "--------------------\n");
37272 if (new_tree && old_tree != new_tree)
37273 rs6000_activate_target_options (new_tree);
37275 if (fndecl)
37276 rs6000_previous_fndecl = fndecl;
37280 /* Save the current options */
37282 static void
37283 rs6000_function_specific_save (struct cl_target_option *ptr,
37284 struct gcc_options *opts)
37286 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37287 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37290 /* Restore the current options */
37292 static void
37293 rs6000_function_specific_restore (struct gcc_options *opts,
37294 struct cl_target_option *ptr)
37297 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37298 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37299 (void) rs6000_option_override_internal (false);
37302 /* Print the current options */
37304 static void
37305 rs6000_function_specific_print (FILE *file, int indent,
37306 struct cl_target_option *ptr)
37308 rs6000_print_isa_options (file, indent, "Isa options set",
37309 ptr->x_rs6000_isa_flags);
37311 rs6000_print_isa_options (file, indent, "Isa options explicit",
37312 ptr->x_rs6000_isa_flags_explicit);
37315 /* Helper function to print the current isa or misc options on a line. */
37317 static void
37318 rs6000_print_options_internal (FILE *file,
37319 int indent,
37320 const char *string,
37321 HOST_WIDE_INT flags,
37322 const char *prefix,
37323 const struct rs6000_opt_mask *opts,
37324 size_t num_elements)
37326 size_t i;
37327 size_t start_column = 0;
37328 size_t cur_column;
37329 size_t max_column = 120;
37330 size_t prefix_len = strlen (prefix);
37331 size_t comma_len = 0;
37332 const char *comma = "";
37334 if (indent)
37335 start_column += fprintf (file, "%*s", indent, "");
37337 if (!flags)
37339 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37340 return;
37343 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37345 /* Print the various mask options. */
37346 cur_column = start_column;
37347 for (i = 0; i < num_elements; i++)
37349 bool invert = opts[i].invert;
37350 const char *name = opts[i].name;
37351 const char *no_str = "";
37352 HOST_WIDE_INT mask = opts[i].mask;
37353 size_t len = comma_len + prefix_len + strlen (name);
37355 if (!invert)
37357 if ((flags & mask) == 0)
37359 no_str = "no-";
37360 len += sizeof ("no-") - 1;
37363 flags &= ~mask;
37366 else
37368 if ((flags & mask) != 0)
37370 no_str = "no-";
37371 len += sizeof ("no-") - 1;
37374 flags |= mask;
37377 cur_column += len;
37378 if (cur_column > max_column)
37380 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37381 cur_column = start_column + len;
37382 comma = "";
37385 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37386 comma = ", ";
37387 comma_len = sizeof (", ") - 1;
37390 fputs ("\n", file);
37393 /* Helper function to print the current isa options on a line. */
37395 static void
37396 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37397 HOST_WIDE_INT flags)
37399 rs6000_print_options_internal (file, indent, string, flags, "-m",
37400 &rs6000_opt_masks[0],
37401 ARRAY_SIZE (rs6000_opt_masks));
37404 static void
37405 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37406 HOST_WIDE_INT flags)
37408 rs6000_print_options_internal (file, indent, string, flags, "",
37409 &rs6000_builtin_mask_names[0],
37410 ARRAY_SIZE (rs6000_builtin_mask_names));
37413 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37414 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37415 -mupper-regs-df, etc.).
37417 If the user used -mno-power8-vector, we need to turn off all of the implicit
37418 ISA 2.07 and 3.0 options that relate to the vector unit.
37420 If the user used -mno-power9-vector, we need to turn off all of the implicit
37421 ISA 3.0 options that relate to the vector unit.
37423 This function does not handle explicit options such as the user specifying
37424 -mdirect-move. These are handled in rs6000_option_override_internal, and
37425 the appropriate error is given if needed.
37427 We return a mask of all of the implicit options that should not be enabled
37428 by default. */
37430 static HOST_WIDE_INT
37431 rs6000_disable_incompatible_switches (void)
37433 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37434 size_t i, j;
37436 static const struct {
37437 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37438 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37439 const char *const name; /* name of the switch. */
37440 } flags[] = {
37441 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37442 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37443 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37446 for (i = 0; i < ARRAY_SIZE (flags); i++)
37448 HOST_WIDE_INT no_flag = flags[i].no_flag;
37450 if ((rs6000_isa_flags & no_flag) == 0
37451 && (rs6000_isa_flags_explicit & no_flag) != 0)
37453 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37454 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37455 & rs6000_isa_flags
37456 & dep_flags);
37458 if (set_flags)
37460 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37461 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37463 set_flags &= ~rs6000_opt_masks[j].mask;
37464 error ("%<-mno-%s%> turns off %<-m%s%>",
37465 flags[i].name,
37466 rs6000_opt_masks[j].name);
37469 gcc_assert (!set_flags);
37472 rs6000_isa_flags &= ~dep_flags;
37473 ignore_masks |= no_flag | dep_flags;
37477 return ignore_masks;
37481 /* Helper function for printing the function name when debugging. */
37483 static const char *
37484 get_decl_name (tree fn)
37486 tree name;
37488 if (!fn)
37489 return "<null>";
37491 name = DECL_NAME (fn);
37492 if (!name)
37493 return "<no-name>";
37495 return IDENTIFIER_POINTER (name);
37498 /* Return the clone id of the target we are compiling code for in a target
37499 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37500 the priority list for the target clones (ordered from lowest to
37501 highest). */
37503 static int
37504 rs6000_clone_priority (tree fndecl)
37506 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37507 HOST_WIDE_INT isa_masks;
37508 int ret = CLONE_DEFAULT;
37509 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37510 const char *attrs_str = NULL;
37512 attrs = TREE_VALUE (TREE_VALUE (attrs));
37513 attrs_str = TREE_STRING_POINTER (attrs);
37515 /* Return priority zero for default function. Return the ISA needed for the
37516 function if it is not the default. */
37517 if (strcmp (attrs_str, "default") != 0)
37519 if (fn_opts == NULL_TREE)
37520 fn_opts = target_option_default_node;
37522 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37523 isa_masks = rs6000_isa_flags;
37524 else
37525 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37527 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37528 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37529 break;
37532 if (TARGET_DEBUG_TARGET)
37533 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37534 get_decl_name (fndecl), ret);
37536 return ret;
37539 /* This compares the priority of target features in function DECL1 and DECL2.
37540 It returns positive value if DECL1 is higher priority, negative value if
37541 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37542 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37544 static int
37545 rs6000_compare_version_priority (tree decl1, tree decl2)
37547 int priority1 = rs6000_clone_priority (decl1);
37548 int priority2 = rs6000_clone_priority (decl2);
37549 int ret = priority1 - priority2;
37551 if (TARGET_DEBUG_TARGET)
37552 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37553 get_decl_name (decl1), get_decl_name (decl2), ret);
37555 return ret;
37558 /* Make a dispatcher declaration for the multi-versioned function DECL.
37559 Calls to DECL function will be replaced with calls to the dispatcher
37560 by the front-end. Returns the decl of the dispatcher function. */
37562 static tree
37563 rs6000_get_function_versions_dispatcher (void *decl)
37565 tree fn = (tree) decl;
37566 struct cgraph_node *node = NULL;
37567 struct cgraph_node *default_node = NULL;
37568 struct cgraph_function_version_info *node_v = NULL;
37569 struct cgraph_function_version_info *first_v = NULL;
37571 tree dispatch_decl = NULL;
37573 struct cgraph_function_version_info *default_version_info = NULL;
37574 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37576 if (TARGET_DEBUG_TARGET)
37577 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37578 get_decl_name (fn));
37580 node = cgraph_node::get (fn);
37581 gcc_assert (node != NULL);
37583 node_v = node->function_version ();
37584 gcc_assert (node_v != NULL);
37586 if (node_v->dispatcher_resolver != NULL)
37587 return node_v->dispatcher_resolver;
37589 /* Find the default version and make it the first node. */
37590 first_v = node_v;
37591 /* Go to the beginning of the chain. */
37592 while (first_v->prev != NULL)
37593 first_v = first_v->prev;
37595 default_version_info = first_v;
37596 while (default_version_info != NULL)
37598 const tree decl2 = default_version_info->this_node->decl;
37599 if (is_function_default_version (decl2))
37600 break;
37601 default_version_info = default_version_info->next;
37604 /* If there is no default node, just return NULL. */
37605 if (default_version_info == NULL)
37606 return NULL;
37608 /* Make default info the first node. */
37609 if (first_v != default_version_info)
37611 default_version_info->prev->next = default_version_info->next;
37612 if (default_version_info->next)
37613 default_version_info->next->prev = default_version_info->prev;
37614 first_v->prev = default_version_info;
37615 default_version_info->next = first_v;
37616 default_version_info->prev = NULL;
37619 default_node = default_version_info->this_node;
37621 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37622 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37623 "target_clones attribute needs GLIBC (2.23 and newer) that "
37624 "exports hardware capability bits");
37625 #else
37627 if (targetm.has_ifunc_p ())
37629 struct cgraph_function_version_info *it_v = NULL;
37630 struct cgraph_node *dispatcher_node = NULL;
37631 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37633 /* Right now, the dispatching is done via ifunc. */
37634 dispatch_decl = make_dispatcher_decl (default_node->decl);
37636 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37637 gcc_assert (dispatcher_node != NULL);
37638 dispatcher_node->dispatcher_function = 1;
37639 dispatcher_version_info
37640 = dispatcher_node->insert_new_function_version ();
37641 dispatcher_version_info->next = default_version_info;
37642 dispatcher_node->definition = 1;
37644 /* Set the dispatcher for all the versions. */
37645 it_v = default_version_info;
37646 while (it_v != NULL)
37648 it_v->dispatcher_resolver = dispatch_decl;
37649 it_v = it_v->next;
37652 else
37654 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37655 "multiversioning needs ifunc which is not supported "
37656 "on this target");
37658 #endif
37660 return dispatch_decl;
37663 /* Make the resolver function decl to dispatch the versions of a multi-
37664 versioned function, DEFAULT_DECL. Create an empty basic block in the
37665 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37666 function. */
37668 static tree
37669 make_resolver_func (const tree default_decl,
37670 const tree dispatch_decl,
37671 basic_block *empty_bb)
37673 /* Make the resolver function static. The resolver function returns
37674 void *. */
37675 tree decl_name = clone_function_name (default_decl, "resolver");
37676 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37677 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37678 tree decl = build_fn_decl (resolver_name, type);
37679 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37681 DECL_NAME (decl) = decl_name;
37682 TREE_USED (decl) = 1;
37683 DECL_ARTIFICIAL (decl) = 1;
37684 DECL_IGNORED_P (decl) = 0;
37685 TREE_PUBLIC (decl) = 0;
37686 DECL_UNINLINABLE (decl) = 1;
37688 /* Resolver is not external, body is generated. */
37689 DECL_EXTERNAL (decl) = 0;
37690 DECL_EXTERNAL (dispatch_decl) = 0;
37692 DECL_CONTEXT (decl) = NULL_TREE;
37693 DECL_INITIAL (decl) = make_node (BLOCK);
37694 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37696 /* Build result decl and add to function_decl. */
37697 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37698 DECL_ARTIFICIAL (t) = 1;
37699 DECL_IGNORED_P (t) = 1;
37700 DECL_RESULT (decl) = t;
37702 gimplify_function_tree (decl);
37703 push_cfun (DECL_STRUCT_FUNCTION (decl));
37704 *empty_bb = init_lowered_empty_function (decl, false,
37705 profile_count::uninitialized ());
37707 cgraph_node::add_new_function (decl, true);
37708 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37710 pop_cfun ();
37712 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37713 DECL_ATTRIBUTES (dispatch_decl)
37714 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37716 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37718 return decl;
37721 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37722 return a pointer to VERSION_DECL if we are running on a machine that
37723 supports the index CLONE_ISA hardware architecture bits. This function will
37724 be called during version dispatch to decide which function version to
37725 execute. It returns the basic block at the end, to which more conditions
37726 can be added. */
37728 static basic_block
37729 add_condition_to_bb (tree function_decl, tree version_decl,
37730 int clone_isa, basic_block new_bb)
37732 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37734 gcc_assert (new_bb != NULL);
37735 gimple_seq gseq = bb_seq (new_bb);
37738 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37739 build_fold_addr_expr (version_decl));
37740 tree result_var = create_tmp_var (ptr_type_node);
37741 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37742 gimple *return_stmt = gimple_build_return (result_var);
37744 if (clone_isa == CLONE_DEFAULT)
37746 gimple_seq_add_stmt (&gseq, convert_stmt);
37747 gimple_seq_add_stmt (&gseq, return_stmt);
37748 set_bb_seq (new_bb, gseq);
37749 gimple_set_bb (convert_stmt, new_bb);
37750 gimple_set_bb (return_stmt, new_bb);
37751 pop_cfun ();
37752 return new_bb;
37755 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37756 tree cond_var = create_tmp_var (bool_int_type_node);
37757 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37758 const char *arg_str = rs6000_clone_map[clone_isa].name;
37759 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37760 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37761 gimple_call_set_lhs (call_cond_stmt, cond_var);
37763 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37764 gimple_set_bb (call_cond_stmt, new_bb);
37765 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37767 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37768 NULL_TREE, NULL_TREE);
37769 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37770 gimple_set_bb (if_else_stmt, new_bb);
37771 gimple_seq_add_stmt (&gseq, if_else_stmt);
37773 gimple_seq_add_stmt (&gseq, convert_stmt);
37774 gimple_seq_add_stmt (&gseq, return_stmt);
37775 set_bb_seq (new_bb, gseq);
37777 basic_block bb1 = new_bb;
37778 edge e12 = split_block (bb1, if_else_stmt);
37779 basic_block bb2 = e12->dest;
37780 e12->flags &= ~EDGE_FALLTHRU;
37781 e12->flags |= EDGE_TRUE_VALUE;
37783 edge e23 = split_block (bb2, return_stmt);
37784 gimple_set_bb (convert_stmt, bb2);
37785 gimple_set_bb (return_stmt, bb2);
37787 basic_block bb3 = e23->dest;
37788 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37790 remove_edge (e23);
37791 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37793 pop_cfun ();
37794 return bb3;
37797 /* This function generates the dispatch function for multi-versioned functions.
37798 DISPATCH_DECL is the function which will contain the dispatch logic.
37799 FNDECLS are the function choices for dispatch, and is a tree chain.
37800 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37801 code is generated. */
37803 static int
37804 dispatch_function_versions (tree dispatch_decl,
37805 void *fndecls_p,
37806 basic_block *empty_bb)
37808 int ix;
37809 tree ele;
37810 vec<tree> *fndecls;
37811 tree clones[CLONE_MAX];
37813 if (TARGET_DEBUG_TARGET)
37814 fputs ("dispatch_function_versions, top\n", stderr);
37816 gcc_assert (dispatch_decl != NULL
37817 && fndecls_p != NULL
37818 && empty_bb != NULL);
37820 /* fndecls_p is actually a vector. */
37821 fndecls = static_cast<vec<tree> *> (fndecls_p);
37823 /* At least one more version other than the default. */
37824 gcc_assert (fndecls->length () >= 2);
37826 /* The first version in the vector is the default decl. */
37827 memset ((void *) clones, '\0', sizeof (clones));
37828 clones[CLONE_DEFAULT] = (*fndecls)[0];
37830 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37831 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37832 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37833 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37834 to insert the code here to do the call. */
37836 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37838 int priority = rs6000_clone_priority (ele);
37839 if (!clones[priority])
37840 clones[priority] = ele;
37843 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37844 if (clones[ix])
37846 if (TARGET_DEBUG_TARGET)
37847 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37848 ix, get_decl_name (clones[ix]));
37850 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37851 *empty_bb);
37854 return 0;
37857 /* Generate the dispatching code body to dispatch multi-versioned function
37858 DECL. The target hook is called to process the "target" attributes and
37859 provide the code to dispatch the right function at run-time. NODE points
37860 to the dispatcher decl whose body will be created. */
37862 static tree
37863 rs6000_generate_version_dispatcher_body (void *node_p)
37865 tree resolver;
37866 basic_block empty_bb;
37867 struct cgraph_node *node = (cgraph_node *) node_p;
37868 struct cgraph_function_version_info *ninfo = node->function_version ();
37870 if (ninfo->dispatcher_resolver)
37871 return ninfo->dispatcher_resolver;
37873 /* node is going to be an alias, so remove the finalized bit. */
37874 node->definition = false;
37876 /* The first version in the chain corresponds to the default version. */
37877 ninfo->dispatcher_resolver = resolver
37878 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37880 if (TARGET_DEBUG_TARGET)
37881 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37882 get_decl_name (resolver));
37884 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37885 auto_vec<tree, 2> fn_ver_vec;
37887 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37888 vinfo;
37889 vinfo = vinfo->next)
37891 struct cgraph_node *version = vinfo->this_node;
37892 /* Check for virtual functions here again, as by this time it should
37893 have been determined if this function needs a vtable index or
37894 not. This happens for methods in derived classes that override
37895 virtual methods in base classes but are not explicitly marked as
37896 virtual. */
37897 if (DECL_VINDEX (version->decl))
37898 sorry ("Virtual function multiversioning not supported");
37900 fn_ver_vec.safe_push (version->decl);
37903 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37904 cgraph_edge::rebuild_edges ();
37905 pop_cfun ();
37906 return resolver;
37910 /* Hook to determine if one function can safely inline another. */
37912 static bool
37913 rs6000_can_inline_p (tree caller, tree callee)
37915 bool ret = false;
37916 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37917 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37919 /* If callee has no option attributes, then it is ok to inline. */
37920 if (!callee_tree)
37921 ret = true;
37923 /* If caller has no option attributes, but callee does then it is not ok to
37924 inline. */
37925 else if (!caller_tree)
37926 ret = false;
37928 else
37930 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37931 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37933 /* Callee's options should a subset of the caller's, i.e. a vsx function
37934 can inline an altivec function but a non-vsx function can't inline a
37935 vsx function. */
37936 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37937 == callee_opts->x_rs6000_isa_flags)
37938 ret = true;
37941 if (TARGET_DEBUG_TARGET)
37942 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37943 get_decl_name (caller), get_decl_name (callee),
37944 (ret ? "can" : "cannot"));
37946 return ret;
37949 /* Allocate a stack temp and fixup the address so it meets the particular
37950 memory requirements (either offetable or REG+REG addressing). */
37953 rs6000_allocate_stack_temp (machine_mode mode,
37954 bool offsettable_p,
37955 bool reg_reg_p)
37957 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37958 rtx addr = XEXP (stack, 0);
37959 int strict_p = reload_completed;
37961 if (!legitimate_indirect_address_p (addr, strict_p))
37963 if (offsettable_p
37964 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37965 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37967 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37968 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37971 return stack;
37974 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37975 to such a form to deal with memory reference instructions like STFIWX that
37976 only take reg+reg addressing. */
37979 rs6000_address_for_fpconvert (rtx x)
37981 rtx addr;
37983 gcc_assert (MEM_P (x));
37984 addr = XEXP (x, 0);
37985 if (can_create_pseudo_p ()
37986 && ! legitimate_indirect_address_p (addr, reload_completed)
37987 && ! legitimate_indexed_address_p (addr, reload_completed))
37989 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37991 rtx reg = XEXP (addr, 0);
37992 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37993 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37994 gcc_assert (REG_P (reg));
37995 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37996 addr = reg;
37998 else if (GET_CODE (addr) == PRE_MODIFY)
38000 rtx reg = XEXP (addr, 0);
38001 rtx expr = XEXP (addr, 1);
38002 gcc_assert (REG_P (reg));
38003 gcc_assert (GET_CODE (expr) == PLUS);
38004 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
38005 addr = reg;
38008 x = replace_equiv_address (x, copy_addr_to_reg (addr));
38011 return x;
38014 /* Given a memory reference, if it is not in the form for altivec memory
38015 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
38016 convert to the altivec format. */
38019 rs6000_address_for_altivec (rtx x)
38021 gcc_assert (MEM_P (x));
38022 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
38024 rtx addr = XEXP (x, 0);
38026 if (!legitimate_indexed_address_p (addr, reload_completed)
38027 && !legitimate_indirect_address_p (addr, reload_completed))
38028 addr = copy_to_mode_reg (Pmode, addr);
38030 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
38031 x = change_address (x, GET_MODE (x), addr);
38034 return x;
38037 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
38039 On the RS/6000, all integer constants are acceptable, most won't be valid
38040 for particular insns, though. Only easy FP constants are acceptable. */
38042 static bool
38043 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
38045 if (TARGET_ELF && tls_referenced_p (x))
38046 return false;
38048 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
38049 || GET_MODE (x) == VOIDmode
38050 || (TARGET_POWERPC64 && mode == DImode)
38051 || easy_fp_constant (x, mode)
38052 || easy_vector_constant (x, mode));
38056 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
38058 static bool
38059 chain_already_loaded (rtx_insn *last)
38061 for (; last != NULL; last = PREV_INSN (last))
38063 if (NONJUMP_INSN_P (last))
38065 rtx patt = PATTERN (last);
38067 if (GET_CODE (patt) == SET)
38069 rtx lhs = XEXP (patt, 0);
38071 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
38072 return true;
38076 return false;
38079 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
38081 void
38082 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38084 const bool direct_call_p
38085 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
38086 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
38087 rtx toc_load = NULL_RTX;
38088 rtx toc_restore = NULL_RTX;
38089 rtx func_addr;
38090 rtx abi_reg = NULL_RTX;
38091 rtx call[4];
38092 int n_call;
38093 rtx insn;
38095 /* Handle longcall attributes. */
38096 if (INTVAL (cookie) & CALL_LONG)
38097 func_desc = rs6000_longcall_ref (func_desc);
38099 /* Handle indirect calls. */
38100 if (GET_CODE (func_desc) != SYMBOL_REF
38101 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
38103 /* Save the TOC into its reserved slot before the call,
38104 and prepare to restore it after the call. */
38105 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
38106 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
38107 rtx stack_toc_mem = gen_frame_mem (Pmode,
38108 gen_rtx_PLUS (Pmode, stack_ptr,
38109 stack_toc_offset));
38110 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
38111 gen_rtvec (1, stack_toc_offset),
38112 UNSPEC_TOCSLOT);
38113 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
38115 /* Can we optimize saving the TOC in the prologue or
38116 do we need to do it at every call? */
38117 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
38118 cfun->machine->save_toc_in_prologue = true;
38119 else
38121 MEM_VOLATILE_P (stack_toc_mem) = 1;
38122 emit_move_insn (stack_toc_mem, toc_reg);
38125 if (DEFAULT_ABI == ABI_ELFv2)
38127 /* A function pointer in the ELFv2 ABI is just a plain address, but
38128 the ABI requires it to be loaded into r12 before the call. */
38129 func_addr = gen_rtx_REG (Pmode, 12);
38130 emit_move_insn (func_addr, func_desc);
38131 abi_reg = func_addr;
38133 else
38135 /* A function pointer under AIX is a pointer to a data area whose
38136 first word contains the actual address of the function, whose
38137 second word contains a pointer to its TOC, and whose third word
38138 contains a value to place in the static chain register (r11).
38139 Note that if we load the static chain, our "trampoline" need
38140 not have any executable code. */
38142 /* Load up address of the actual function. */
38143 func_desc = force_reg (Pmode, func_desc);
38144 func_addr = gen_reg_rtx (Pmode);
38145 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
38147 /* Prepare to load the TOC of the called function. Note that the
38148 TOC load must happen immediately before the actual call so
38149 that unwinding the TOC registers works correctly. See the
38150 comment in frob_update_context. */
38151 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38152 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38153 gen_rtx_PLUS (Pmode, func_desc,
38154 func_toc_offset));
38155 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38157 /* If we have a static chain, load it up. But, if the call was
38158 originally direct, the 3rd word has not been written since no
38159 trampoline has been built, so we ought not to load it, lest we
38160 override a static chain value. */
38161 if (!direct_call_p
38162 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38163 && !chain_already_loaded (get_current_sequence ()->next->last))
38165 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38166 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38167 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38168 gen_rtx_PLUS (Pmode, func_desc,
38169 func_sc_offset));
38170 emit_move_insn (sc_reg, func_sc_mem);
38171 abi_reg = sc_reg;
38175 else
38177 /* Direct calls use the TOC: for local calls, the callee will
38178 assume the TOC register is set; for non-local calls, the
38179 PLT stub needs the TOC register. */
38180 abi_reg = toc_reg;
38181 func_addr = func_desc;
38184 /* Create the call. */
38185 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
38186 if (value != NULL_RTX)
38187 call[0] = gen_rtx_SET (value, call[0]);
38188 n_call = 1;
38190 if (toc_load)
38191 call[n_call++] = toc_load;
38192 if (toc_restore)
38193 call[n_call++] = toc_restore;
38195 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38197 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38198 insn = emit_call_insn (insn);
38200 /* Mention all registers defined by the ABI to hold information
38201 as uses in CALL_INSN_FUNCTION_USAGE. */
38202 if (abi_reg)
38203 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38206 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38208 void
38209 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38211 rtx call[2];
38212 rtx insn;
38214 gcc_assert (INTVAL (cookie) == 0);
38216 /* Create the call. */
38217 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
38218 if (value != NULL_RTX)
38219 call[0] = gen_rtx_SET (value, call[0]);
38221 call[1] = simple_return_rtx;
38223 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38224 insn = emit_call_insn (insn);
38226 /* Note use of the TOC register. */
38227 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38230 /* Return whether we need to always update the saved TOC pointer when we update
38231 the stack pointer. */
38233 static bool
38234 rs6000_save_toc_in_prologue_p (void)
38236 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38239 #ifdef HAVE_GAS_HIDDEN
38240 # define USE_HIDDEN_LINKONCE 1
38241 #else
38242 # define USE_HIDDEN_LINKONCE 0
38243 #endif
38245 /* Fills in the label name that should be used for a 476 link stack thunk. */
38247 void
38248 get_ppc476_thunk_name (char name[32])
38250 gcc_assert (TARGET_LINK_STACK);
38252 if (USE_HIDDEN_LINKONCE)
38253 sprintf (name, "__ppc476.get_thunk");
38254 else
38255 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38258 /* This function emits the simple thunk routine that is used to preserve
38259 the link stack on the 476 cpu. */
38261 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38262 static void
38263 rs6000_code_end (void)
38265 char name[32];
38266 tree decl;
38268 if (!TARGET_LINK_STACK)
38269 return;
38271 get_ppc476_thunk_name (name);
38273 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38274 build_function_type_list (void_type_node, NULL_TREE));
38275 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38276 NULL_TREE, void_type_node);
38277 TREE_PUBLIC (decl) = 1;
38278 TREE_STATIC (decl) = 1;
38280 #if RS6000_WEAK
38281 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38283 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38284 targetm.asm_out.unique_section (decl, 0);
38285 switch_to_section (get_named_section (decl, NULL, 0));
38286 DECL_WEAK (decl) = 1;
38287 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38288 targetm.asm_out.globalize_label (asm_out_file, name);
38289 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38290 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38292 else
38293 #endif
38295 switch_to_section (text_section);
38296 ASM_OUTPUT_LABEL (asm_out_file, name);
38299 DECL_INITIAL (decl) = make_node (BLOCK);
38300 current_function_decl = decl;
38301 allocate_struct_function (decl, false);
38302 init_function_start (decl);
38303 first_function_block_is_cold = false;
38304 /* Make sure unwind info is emitted for the thunk if needed. */
38305 final_start_function (emit_barrier (), asm_out_file, 1);
38307 fputs ("\tblr\n", asm_out_file);
38309 final_end_function ();
38310 init_insn_lengths ();
38311 free_after_compilation (cfun);
38312 set_cfun (NULL);
38313 current_function_decl = NULL;
38316 /* Add r30 to hard reg set if the prologue sets it up and it is not
38317 pic_offset_table_rtx. */
38319 static void
38320 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38322 if (!TARGET_SINGLE_PIC_BASE
38323 && TARGET_TOC
38324 && TARGET_MINIMAL_TOC
38325 && !constant_pool_empty_p ())
38326 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38327 if (cfun->machine->split_stack_argp_used)
38328 add_to_hard_reg_set (&set->set, Pmode, 12);
38330 /* Make sure the hard reg set doesn't include r2, which was possibly added
38331 via PIC_OFFSET_TABLE_REGNUM. */
38332 if (TARGET_TOC)
38333 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38337 /* Helper function for rs6000_split_logical to emit a logical instruction after
38338 spliting the operation to single GPR registers.
38340 DEST is the destination register.
38341 OP1 and OP2 are the input source registers.
38342 CODE is the base operation (AND, IOR, XOR, NOT).
38343 MODE is the machine mode.
38344 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38345 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38346 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38348 static void
38349 rs6000_split_logical_inner (rtx dest,
38350 rtx op1,
38351 rtx op2,
38352 enum rtx_code code,
38353 machine_mode mode,
38354 bool complement_final_p,
38355 bool complement_op1_p,
38356 bool complement_op2_p)
38358 rtx bool_rtx;
38360 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38361 if (op2 && GET_CODE (op2) == CONST_INT
38362 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38363 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38365 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38366 HOST_WIDE_INT value = INTVAL (op2) & mask;
38368 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38369 if (code == AND)
38371 if (value == 0)
38373 emit_insn (gen_rtx_SET (dest, const0_rtx));
38374 return;
38377 else if (value == mask)
38379 if (!rtx_equal_p (dest, op1))
38380 emit_insn (gen_rtx_SET (dest, op1));
38381 return;
38385 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38386 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38387 else if (code == IOR || code == XOR)
38389 if (value == 0)
38391 if (!rtx_equal_p (dest, op1))
38392 emit_insn (gen_rtx_SET (dest, op1));
38393 return;
38398 if (code == AND && mode == SImode
38399 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38401 emit_insn (gen_andsi3 (dest, op1, op2));
38402 return;
38405 if (complement_op1_p)
38406 op1 = gen_rtx_NOT (mode, op1);
38408 if (complement_op2_p)
38409 op2 = gen_rtx_NOT (mode, op2);
38411 /* For canonical RTL, if only one arm is inverted it is the first. */
38412 if (!complement_op1_p && complement_op2_p)
38413 std::swap (op1, op2);
38415 bool_rtx = ((code == NOT)
38416 ? gen_rtx_NOT (mode, op1)
38417 : gen_rtx_fmt_ee (code, mode, op1, op2));
38419 if (complement_final_p)
38420 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38422 emit_insn (gen_rtx_SET (dest, bool_rtx));
38425 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38426 operations are split immediately during RTL generation to allow for more
38427 optimizations of the AND/IOR/XOR.
38429 OPERANDS is an array containing the destination and two input operands.
38430 CODE is the base operation (AND, IOR, XOR, NOT).
38431 MODE is the machine mode.
38432 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38433 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38434 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38435 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38436 formation of the AND instructions. */
38438 static void
38439 rs6000_split_logical_di (rtx operands[3],
38440 enum rtx_code code,
38441 bool complement_final_p,
38442 bool complement_op1_p,
38443 bool complement_op2_p)
38445 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38446 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38447 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38448 enum hi_lo { hi = 0, lo = 1 };
38449 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38450 size_t i;
38452 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38453 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38454 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38455 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38457 if (code == NOT)
38458 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38459 else
38461 if (GET_CODE (operands[2]) != CONST_INT)
38463 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38464 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38466 else
38468 HOST_WIDE_INT value = INTVAL (operands[2]);
38469 HOST_WIDE_INT value_hi_lo[2];
38471 gcc_assert (!complement_final_p);
38472 gcc_assert (!complement_op1_p);
38473 gcc_assert (!complement_op2_p);
38475 value_hi_lo[hi] = value >> 32;
38476 value_hi_lo[lo] = value & lower_32bits;
38478 for (i = 0; i < 2; i++)
38480 HOST_WIDE_INT sub_value = value_hi_lo[i];
38482 if (sub_value & sign_bit)
38483 sub_value |= upper_32bits;
38485 op2_hi_lo[i] = GEN_INT (sub_value);
38487 /* If this is an AND instruction, check to see if we need to load
38488 the value in a register. */
38489 if (code == AND && sub_value != -1 && sub_value != 0
38490 && !and_operand (op2_hi_lo[i], SImode))
38491 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38496 for (i = 0; i < 2; i++)
38498 /* Split large IOR/XOR operations. */
38499 if ((code == IOR || code == XOR)
38500 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38501 && !complement_final_p
38502 && !complement_op1_p
38503 && !complement_op2_p
38504 && !logical_const_operand (op2_hi_lo[i], SImode))
38506 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38507 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38508 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38509 rtx tmp = gen_reg_rtx (SImode);
38511 /* Make sure the constant is sign extended. */
38512 if ((hi_16bits & sign_bit) != 0)
38513 hi_16bits |= upper_32bits;
38515 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38516 code, SImode, false, false, false);
38518 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38519 code, SImode, false, false, false);
38521 else
38522 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38523 code, SImode, complement_final_p,
38524 complement_op1_p, complement_op2_p);
38527 return;
38530 /* Split the insns that make up boolean operations operating on multiple GPR
38531 registers. The boolean MD patterns ensure that the inputs either are
38532 exactly the same as the output registers, or there is no overlap.
38534 OPERANDS is an array containing the destination and two input operands.
38535 CODE is the base operation (AND, IOR, XOR, NOT).
38536 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38537 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38538 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38540 void
38541 rs6000_split_logical (rtx operands[3],
38542 enum rtx_code code,
38543 bool complement_final_p,
38544 bool complement_op1_p,
38545 bool complement_op2_p)
38547 machine_mode mode = GET_MODE (operands[0]);
38548 machine_mode sub_mode;
38549 rtx op0, op1, op2;
38550 int sub_size, regno0, regno1, nregs, i;
38552 /* If this is DImode, use the specialized version that can run before
38553 register allocation. */
38554 if (mode == DImode && !TARGET_POWERPC64)
38556 rs6000_split_logical_di (operands, code, complement_final_p,
38557 complement_op1_p, complement_op2_p);
38558 return;
38561 op0 = operands[0];
38562 op1 = operands[1];
38563 op2 = (code == NOT) ? NULL_RTX : operands[2];
38564 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38565 sub_size = GET_MODE_SIZE (sub_mode);
38566 regno0 = REGNO (op0);
38567 regno1 = REGNO (op1);
38569 gcc_assert (reload_completed);
38570 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38571 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38573 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38574 gcc_assert (nregs > 1);
38576 if (op2 && REG_P (op2))
38577 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38579 for (i = 0; i < nregs; i++)
38581 int offset = i * sub_size;
38582 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38583 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38584 rtx sub_op2 = ((code == NOT)
38585 ? NULL_RTX
38586 : simplify_subreg (sub_mode, op2, mode, offset));
38588 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38589 complement_final_p, complement_op1_p,
38590 complement_op2_p);
38593 return;
38597 /* Return true if the peephole2 can combine a load involving a combination of
38598 an addis instruction and a load with an offset that can be fused together on
38599 a power8. */
38601 bool
38602 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38603 rtx addis_value, /* addis value. */
38604 rtx target, /* target register that is loaded. */
38605 rtx mem) /* bottom part of the memory addr. */
38607 rtx addr;
38608 rtx base_reg;
38610 /* Validate arguments. */
38611 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38612 return false;
38614 if (!base_reg_operand (target, GET_MODE (target)))
38615 return false;
38617 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38618 return false;
38620 /* Allow sign/zero extension. */
38621 if (GET_CODE (mem) == ZERO_EXTEND
38622 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38623 mem = XEXP (mem, 0);
38625 if (!MEM_P (mem))
38626 return false;
38628 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38629 return false;
38631 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38632 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38633 return false;
38635 /* Validate that the register used to load the high value is either the
38636 register being loaded, or we can safely replace its use.
38638 This function is only called from the peephole2 pass and we assume that
38639 there are 2 instructions in the peephole (addis and load), so we want to
38640 check if the target register was not used in the memory address and the
38641 register to hold the addis result is dead after the peephole. */
38642 if (REGNO (addis_reg) != REGNO (target))
38644 if (reg_mentioned_p (target, mem))
38645 return false;
38647 if (!peep2_reg_dead_p (2, addis_reg))
38648 return false;
38650 /* If the target register being loaded is the stack pointer, we must
38651 avoid loading any other value into it, even temporarily. */
38652 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38653 return false;
38656 base_reg = XEXP (addr, 0);
38657 return REGNO (addis_reg) == REGNO (base_reg);
38660 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38661 sequence. We adjust the addis register to use the target register. If the
38662 load sign extends, we adjust the code to do the zero extending load, and an
38663 explicit sign extension later since the fusion only covers zero extending
38664 loads.
38666 The operands are:
38667 operands[0] register set with addis (to be replaced with target)
38668 operands[1] value set via addis
38669 operands[2] target register being loaded
38670 operands[3] D-form memory reference using operands[0]. */
38672 void
38673 expand_fusion_gpr_load (rtx *operands)
38675 rtx addis_value = operands[1];
38676 rtx target = operands[2];
38677 rtx orig_mem = operands[3];
38678 rtx new_addr, new_mem, orig_addr, offset;
38679 enum rtx_code plus_or_lo_sum;
38680 machine_mode target_mode = GET_MODE (target);
38681 machine_mode extend_mode = target_mode;
38682 machine_mode ptr_mode = Pmode;
38683 enum rtx_code extend = UNKNOWN;
38685 if (GET_CODE (orig_mem) == ZERO_EXTEND
38686 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38688 extend = GET_CODE (orig_mem);
38689 orig_mem = XEXP (orig_mem, 0);
38690 target_mode = GET_MODE (orig_mem);
38693 gcc_assert (MEM_P (orig_mem));
38695 orig_addr = XEXP (orig_mem, 0);
38696 plus_or_lo_sum = GET_CODE (orig_addr);
38697 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38699 offset = XEXP (orig_addr, 1);
38700 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38701 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38703 if (extend != UNKNOWN)
38704 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38706 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38707 UNSPEC_FUSION_GPR);
38708 emit_insn (gen_rtx_SET (target, new_mem));
38710 if (extend == SIGN_EXTEND)
38712 int sub_off = ((BYTES_BIG_ENDIAN)
38713 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38714 : 0);
38715 rtx sign_reg
38716 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38718 emit_insn (gen_rtx_SET (target,
38719 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38722 return;
38725 /* Emit the addis instruction that will be part of a fused instruction
38726 sequence. */
38728 void
38729 emit_fusion_addis (rtx target, rtx addis_value)
38731 rtx fuse_ops[10];
38732 const char *addis_str = NULL;
38734 /* Emit the addis instruction. */
38735 fuse_ops[0] = target;
38736 if (satisfies_constraint_L (addis_value))
38738 fuse_ops[1] = addis_value;
38739 addis_str = "lis %0,%v1";
38742 else if (GET_CODE (addis_value) == PLUS)
38744 rtx op0 = XEXP (addis_value, 0);
38745 rtx op1 = XEXP (addis_value, 1);
38747 if (REG_P (op0) && CONST_INT_P (op1)
38748 && satisfies_constraint_L (op1))
38750 fuse_ops[1] = op0;
38751 fuse_ops[2] = op1;
38752 addis_str = "addis %0,%1,%v2";
38756 else if (GET_CODE (addis_value) == HIGH)
38758 rtx value = XEXP (addis_value, 0);
38759 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38761 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38762 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38763 if (TARGET_ELF)
38764 addis_str = "addis %0,%2,%1@toc@ha";
38766 else if (TARGET_XCOFF)
38767 addis_str = "addis %0,%1@u(%2)";
38769 else
38770 gcc_unreachable ();
38773 else if (GET_CODE (value) == PLUS)
38775 rtx op0 = XEXP (value, 0);
38776 rtx op1 = XEXP (value, 1);
38778 if (GET_CODE (op0) == UNSPEC
38779 && XINT (op0, 1) == UNSPEC_TOCREL
38780 && CONST_INT_P (op1))
38782 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38783 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38784 fuse_ops[3] = op1;
38785 if (TARGET_ELF)
38786 addis_str = "addis %0,%2,%1+%3@toc@ha";
38788 else if (TARGET_XCOFF)
38789 addis_str = "addis %0,%1+%3@u(%2)";
38791 else
38792 gcc_unreachable ();
38796 else if (satisfies_constraint_L (value))
38798 fuse_ops[1] = value;
38799 addis_str = "lis %0,%v1";
38802 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38804 fuse_ops[1] = value;
38805 addis_str = "lis %0,%1@ha";
38809 if (!addis_str)
38810 fatal_insn ("Could not generate addis value for fusion", addis_value);
38812 output_asm_insn (addis_str, fuse_ops);
38815 /* Emit a D-form load or store instruction that is the second instruction
38816 of a fusion sequence. */
38818 void
38819 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38820 const char *insn_str)
38822 rtx fuse_ops[10];
38823 char insn_template[80];
38825 fuse_ops[0] = load_store_reg;
38826 fuse_ops[1] = addis_reg;
38828 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38830 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38831 fuse_ops[2] = offset;
38832 output_asm_insn (insn_template, fuse_ops);
38835 else if (GET_CODE (offset) == UNSPEC
38836 && XINT (offset, 1) == UNSPEC_TOCREL)
38838 if (TARGET_ELF)
38839 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38841 else if (TARGET_XCOFF)
38842 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38844 else
38845 gcc_unreachable ();
38847 fuse_ops[2] = XVECEXP (offset, 0, 0);
38848 output_asm_insn (insn_template, fuse_ops);
38851 else if (GET_CODE (offset) == PLUS
38852 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38853 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38854 && CONST_INT_P (XEXP (offset, 1)))
38856 rtx tocrel_unspec = XEXP (offset, 0);
38857 if (TARGET_ELF)
38858 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38860 else if (TARGET_XCOFF)
38861 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38863 else
38864 gcc_unreachable ();
38866 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38867 fuse_ops[3] = XEXP (offset, 1);
38868 output_asm_insn (insn_template, fuse_ops);
38871 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38873 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38875 fuse_ops[2] = offset;
38876 output_asm_insn (insn_template, fuse_ops);
38879 else
38880 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38882 return;
38885 /* Wrap a TOC address that can be fused to indicate that special fusion
38886 processing is needed. */
38889 fusion_wrap_memory_address (rtx old_mem)
38891 rtx old_addr = XEXP (old_mem, 0);
38892 rtvec v = gen_rtvec (1, old_addr);
38893 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38894 return replace_equiv_address_nv (old_mem, new_addr, false);
38897 /* Given an address, convert it into the addis and load offset parts. Addresses
38898 created during the peephole2 process look like:
38899 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38900 (unspec [(...)] UNSPEC_TOCREL))
38902 Addresses created via toc fusion look like:
38903 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38905 static void
38906 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38908 rtx hi, lo;
38910 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38912 lo = XVECEXP (addr, 0, 0);
38913 hi = gen_rtx_HIGH (Pmode, lo);
38915 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38917 hi = XEXP (addr, 0);
38918 lo = XEXP (addr, 1);
38920 else
38921 gcc_unreachable ();
38923 *p_hi = hi;
38924 *p_lo = lo;
38927 /* Return a string to fuse an addis instruction with a gpr load to the same
38928 register that we loaded up the addis instruction. The address that is used
38929 is the logical address that was formed during peephole2:
38930 (lo_sum (high) (low-part))
38932 Or the address is the TOC address that is wrapped before register allocation:
38933 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38935 The code is complicated, so we call output_asm_insn directly, and just
38936 return "". */
38938 const char *
38939 emit_fusion_gpr_load (rtx target, rtx mem)
38941 rtx addis_value;
38942 rtx addr;
38943 rtx load_offset;
38944 const char *load_str = NULL;
38945 machine_mode mode;
38947 if (GET_CODE (mem) == ZERO_EXTEND)
38948 mem = XEXP (mem, 0);
38950 gcc_assert (REG_P (target) && MEM_P (mem));
38952 addr = XEXP (mem, 0);
38953 fusion_split_address (addr, &addis_value, &load_offset);
38955 /* Now emit the load instruction to the same register. */
38956 mode = GET_MODE (mem);
38957 switch (mode)
38959 case E_QImode:
38960 load_str = "lbz";
38961 break;
38963 case E_HImode:
38964 load_str = "lhz";
38965 break;
38967 case E_SImode:
38968 case E_SFmode:
38969 load_str = "lwz";
38970 break;
38972 case E_DImode:
38973 case E_DFmode:
38974 gcc_assert (TARGET_POWERPC64);
38975 load_str = "ld";
38976 break;
38978 default:
38979 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38982 /* Emit the addis instruction. */
38983 emit_fusion_addis (target, addis_value);
38985 /* Emit the D-form load instruction. */
38986 emit_fusion_load_store (target, target, load_offset, load_str);
38988 return "";
38992 /* Return true if the peephole2 can combine a load/store involving a
38993 combination of an addis instruction and the memory operation. This was
38994 added to the ISA 3.0 (power9) hardware. */
38996 bool
38997 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38998 rtx addis_value, /* addis value. */
38999 rtx dest, /* destination (memory or register). */
39000 rtx src) /* source (register or memory). */
39002 rtx addr, mem, offset;
39003 machine_mode mode = GET_MODE (src);
39005 /* Validate arguments. */
39006 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
39007 return false;
39009 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
39010 return false;
39012 /* Ignore extend operations that are part of the load. */
39013 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
39014 src = XEXP (src, 0);
39016 /* Test for memory<-register or register<-memory. */
39017 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
39019 if (!MEM_P (dest))
39020 return false;
39022 mem = dest;
39025 else if (MEM_P (src))
39027 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
39028 return false;
39030 mem = src;
39033 else
39034 return false;
39036 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
39037 if (GET_CODE (addr) == PLUS)
39039 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
39040 return false;
39042 return satisfies_constraint_I (XEXP (addr, 1));
39045 else if (GET_CODE (addr) == LO_SUM)
39047 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
39048 return false;
39050 offset = XEXP (addr, 1);
39051 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
39052 return small_toc_ref (offset, GET_MODE (offset));
39054 else if (TARGET_ELF && !TARGET_POWERPC64)
39055 return CONSTANT_P (offset);
39058 return false;
39061 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39062 load sequence.
39064 The operands are:
39065 operands[0] register set with addis
39066 operands[1] value set via addis
39067 operands[2] target register being loaded
39068 operands[3] D-form memory reference using operands[0].
39070 This is similar to the fusion introduced with power8, except it scales to
39071 both loads/stores and does not require the result register to be the same as
39072 the base register. At the moment, we only do this if register set with addis
39073 is dead. */
39075 void
39076 expand_fusion_p9_load (rtx *operands)
39078 rtx tmp_reg = operands[0];
39079 rtx addis_value = operands[1];
39080 rtx target = operands[2];
39081 rtx orig_mem = operands[3];
39082 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
39083 enum rtx_code plus_or_lo_sum;
39084 machine_mode target_mode = GET_MODE (target);
39085 machine_mode extend_mode = target_mode;
39086 machine_mode ptr_mode = Pmode;
39087 enum rtx_code extend = UNKNOWN;
39089 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
39091 extend = GET_CODE (orig_mem);
39092 orig_mem = XEXP (orig_mem, 0);
39093 target_mode = GET_MODE (orig_mem);
39096 gcc_assert (MEM_P (orig_mem));
39098 orig_addr = XEXP (orig_mem, 0);
39099 plus_or_lo_sum = GET_CODE (orig_addr);
39100 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39102 offset = XEXP (orig_addr, 1);
39103 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39104 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39106 if (extend != UNKNOWN)
39107 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
39109 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
39110 UNSPEC_FUSION_P9);
39112 set = gen_rtx_SET (target, new_mem);
39113 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39114 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39115 emit_insn (insn);
39117 return;
39120 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39121 store sequence.
39123 The operands are:
39124 operands[0] register set with addis
39125 operands[1] value set via addis
39126 operands[2] target D-form memory being stored to
39127 operands[3] register being stored
39129 This is similar to the fusion introduced with power8, except it scales to
39130 both loads/stores and does not require the result register to be the same as
39131 the base register. At the moment, we only do this if register set with addis
39132 is dead. */
39134 void
39135 expand_fusion_p9_store (rtx *operands)
39137 rtx tmp_reg = operands[0];
39138 rtx addis_value = operands[1];
39139 rtx orig_mem = operands[2];
39140 rtx src = operands[3];
39141 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
39142 enum rtx_code plus_or_lo_sum;
39143 machine_mode target_mode = GET_MODE (orig_mem);
39144 machine_mode ptr_mode = Pmode;
39146 gcc_assert (MEM_P (orig_mem));
39148 orig_addr = XEXP (orig_mem, 0);
39149 plus_or_lo_sum = GET_CODE (orig_addr);
39150 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39152 offset = XEXP (orig_addr, 1);
39153 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39154 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39156 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
39157 UNSPEC_FUSION_P9);
39159 set = gen_rtx_SET (new_mem, new_src);
39160 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39161 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39162 emit_insn (insn);
39164 return;
39167 /* Return a string to fuse an addis instruction with a load using extended
39168 fusion. The address that is used is the logical address that was formed
39169 during peephole2: (lo_sum (high) (low-part))
39171 The code is complicated, so we call output_asm_insn directly, and just
39172 return "". */
39174 const char *
39175 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
39177 machine_mode mode = GET_MODE (reg);
39178 rtx hi;
39179 rtx lo;
39180 rtx addr;
39181 const char *load_string;
39182 int r;
39184 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
39186 mem = XEXP (mem, 0);
39187 mode = GET_MODE (mem);
39190 if (GET_CODE (reg) == SUBREG)
39192 gcc_assert (SUBREG_BYTE (reg) == 0);
39193 reg = SUBREG_REG (reg);
39196 if (!REG_P (reg))
39197 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
39199 r = REGNO (reg);
39200 if (FP_REGNO_P (r))
39202 if (mode == SFmode)
39203 load_string = "lfs";
39204 else if (mode == DFmode || mode == DImode)
39205 load_string = "lfd";
39206 else
39207 gcc_unreachable ();
39209 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39211 if (mode == SFmode)
39212 load_string = "lxssp";
39213 else if (mode == DFmode || mode == DImode)
39214 load_string = "lxsd";
39215 else
39216 gcc_unreachable ();
39218 else if (INT_REGNO_P (r))
39220 switch (mode)
39222 case E_QImode:
39223 load_string = "lbz";
39224 break;
39225 case E_HImode:
39226 load_string = "lhz";
39227 break;
39228 case E_SImode:
39229 case E_SFmode:
39230 load_string = "lwz";
39231 break;
39232 case E_DImode:
39233 case E_DFmode:
39234 if (!TARGET_POWERPC64)
39235 gcc_unreachable ();
39236 load_string = "ld";
39237 break;
39238 default:
39239 gcc_unreachable ();
39242 else
39243 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
39245 if (!MEM_P (mem))
39246 fatal_insn ("emit_fusion_p9_load not MEM", mem);
39248 addr = XEXP (mem, 0);
39249 fusion_split_address (addr, &hi, &lo);
39251 /* Emit the addis instruction. */
39252 emit_fusion_addis (tmp_reg, hi);
39254 /* Emit the D-form load instruction. */
39255 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
39257 return "";
39260 /* Return a string to fuse an addis instruction with a store using extended
39261 fusion. The address that is used is the logical address that was formed
39262 during peephole2: (lo_sum (high) (low-part))
39264 The code is complicated, so we call output_asm_insn directly, and just
39265 return "". */
39267 const char *
39268 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
39270 machine_mode mode = GET_MODE (reg);
39271 rtx hi;
39272 rtx lo;
39273 rtx addr;
39274 const char *store_string;
39275 int r;
39277 if (GET_CODE (reg) == SUBREG)
39279 gcc_assert (SUBREG_BYTE (reg) == 0);
39280 reg = SUBREG_REG (reg);
39283 if (!REG_P (reg))
39284 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
39286 r = REGNO (reg);
39287 if (FP_REGNO_P (r))
39289 if (mode == SFmode)
39290 store_string = "stfs";
39291 else if (mode == DFmode)
39292 store_string = "stfd";
39293 else
39294 gcc_unreachable ();
39296 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39298 if (mode == SFmode)
39299 store_string = "stxssp";
39300 else if (mode == DFmode || mode == DImode)
39301 store_string = "stxsd";
39302 else
39303 gcc_unreachable ();
39305 else if (INT_REGNO_P (r))
39307 switch (mode)
39309 case E_QImode:
39310 store_string = "stb";
39311 break;
39312 case E_HImode:
39313 store_string = "sth";
39314 break;
39315 case E_SImode:
39316 case E_SFmode:
39317 store_string = "stw";
39318 break;
39319 case E_DImode:
39320 case E_DFmode:
39321 if (!TARGET_POWERPC64)
39322 gcc_unreachable ();
39323 store_string = "std";
39324 break;
39325 default:
39326 gcc_unreachable ();
39329 else
39330 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
39332 if (!MEM_P (mem))
39333 fatal_insn ("emit_fusion_p9_store not MEM", mem);
39335 addr = XEXP (mem, 0);
39336 fusion_split_address (addr, &hi, &lo);
39338 /* Emit the addis instruction. */
39339 emit_fusion_addis (tmp_reg, hi);
39341 /* Emit the D-form load instruction. */
39342 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
39344 return "";
39347 #ifdef RS6000_GLIBC_ATOMIC_FENV
39348 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39349 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39350 #endif
39352 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39354 static void
39355 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39357 if (!TARGET_HARD_FLOAT)
39359 #ifdef RS6000_GLIBC_ATOMIC_FENV
39360 if (atomic_hold_decl == NULL_TREE)
39362 atomic_hold_decl
39363 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39364 get_identifier ("__atomic_feholdexcept"),
39365 build_function_type_list (void_type_node,
39366 double_ptr_type_node,
39367 NULL_TREE));
39368 TREE_PUBLIC (atomic_hold_decl) = 1;
39369 DECL_EXTERNAL (atomic_hold_decl) = 1;
39372 if (atomic_clear_decl == NULL_TREE)
39374 atomic_clear_decl
39375 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39376 get_identifier ("__atomic_feclearexcept"),
39377 build_function_type_list (void_type_node,
39378 NULL_TREE));
39379 TREE_PUBLIC (atomic_clear_decl) = 1;
39380 DECL_EXTERNAL (atomic_clear_decl) = 1;
39383 tree const_double = build_qualified_type (double_type_node,
39384 TYPE_QUAL_CONST);
39385 tree const_double_ptr = build_pointer_type (const_double);
39386 if (atomic_update_decl == NULL_TREE)
39388 atomic_update_decl
39389 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39390 get_identifier ("__atomic_feupdateenv"),
39391 build_function_type_list (void_type_node,
39392 const_double_ptr,
39393 NULL_TREE));
39394 TREE_PUBLIC (atomic_update_decl) = 1;
39395 DECL_EXTERNAL (atomic_update_decl) = 1;
39398 tree fenv_var = create_tmp_var_raw (double_type_node);
39399 TREE_ADDRESSABLE (fenv_var) = 1;
39400 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39402 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39403 *clear = build_call_expr (atomic_clear_decl, 0);
39404 *update = build_call_expr (atomic_update_decl, 1,
39405 fold_convert (const_double_ptr, fenv_addr));
39406 #endif
39407 return;
39410 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39411 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39412 tree call_mffs = build_call_expr (mffs, 0);
39414 /* Generates the equivalent of feholdexcept (&fenv_var)
39416 *fenv_var = __builtin_mffs ();
39417 double fenv_hold;
39418 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39419 __builtin_mtfsf (0xff, fenv_hold); */
39421 /* Mask to clear everything except for the rounding modes and non-IEEE
39422 arithmetic flag. */
39423 const unsigned HOST_WIDE_INT hold_exception_mask =
39424 HOST_WIDE_INT_C (0xffffffff00000007);
39426 tree fenv_var = create_tmp_var_raw (double_type_node);
39428 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39430 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39431 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39432 build_int_cst (uint64_type_node,
39433 hold_exception_mask));
39435 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39436 fenv_llu_and);
39438 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39439 build_int_cst (unsigned_type_node, 0xff),
39440 fenv_hold_mtfsf);
39442 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39444 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39446 double fenv_clear = __builtin_mffs ();
39447 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39448 __builtin_mtfsf (0xff, fenv_clear); */
39450 /* Mask to clear everything except for the rounding modes and non-IEEE
39451 arithmetic flag. */
39452 const unsigned HOST_WIDE_INT clear_exception_mask =
39453 HOST_WIDE_INT_C (0xffffffff00000000);
39455 tree fenv_clear = create_tmp_var_raw (double_type_node);
39457 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39459 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39460 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39461 fenv_clean_llu,
39462 build_int_cst (uint64_type_node,
39463 clear_exception_mask));
39465 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39466 fenv_clear_llu_and);
39468 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39469 build_int_cst (unsigned_type_node, 0xff),
39470 fenv_clear_mtfsf);
39472 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39474 /* Generates the equivalent of feupdateenv (&fenv_var)
39476 double old_fenv = __builtin_mffs ();
39477 double fenv_update;
39478 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39479 (*(uint64_t*)fenv_var 0x1ff80fff);
39480 __builtin_mtfsf (0xff, fenv_update); */
39482 const unsigned HOST_WIDE_INT update_exception_mask =
39483 HOST_WIDE_INT_C (0xffffffff1fffff00);
39484 const unsigned HOST_WIDE_INT new_exception_mask =
39485 HOST_WIDE_INT_C (0x1ff80fff);
39487 tree old_fenv = create_tmp_var_raw (double_type_node);
39488 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39490 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39491 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39492 build_int_cst (uint64_type_node,
39493 update_exception_mask));
39495 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39496 build_int_cst (uint64_type_node,
39497 new_exception_mask));
39499 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39500 old_llu_and, new_llu_and);
39502 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39503 new_llu_mask);
39505 tree update_mtfsf = build_call_expr (mtfsf, 2,
39506 build_int_cst (unsigned_type_node, 0xff),
39507 fenv_update_mtfsf);
39509 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39512 void
39513 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39515 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39517 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39518 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39520 /* The destination of the vmrgew instruction layout is:
39521 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39522 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39523 vmrgew instruction will be correct. */
39524 if (VECTOR_ELT_ORDER_BIG)
39526 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39527 GEN_INT (0)));
39528 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39529 GEN_INT (3)));
39531 else
39533 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39534 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39537 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39538 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39540 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39541 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39543 if (VECTOR_ELT_ORDER_BIG)
39544 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39545 else
39546 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39549 void
39550 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39552 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39554 rtx_tmp0 = gen_reg_rtx (V2DImode);
39555 rtx_tmp1 = gen_reg_rtx (V2DImode);
39557 /* The destination of the vmrgew instruction layout is:
39558 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39559 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39560 vmrgew instruction will be correct. */
39561 if (VECTOR_ELT_ORDER_BIG)
39563 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39564 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39566 else
39568 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39569 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39572 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39573 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39575 if (signed_convert)
39577 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39578 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39580 else
39582 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39583 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39586 if (VECTOR_ELT_ORDER_BIG)
39587 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39588 else
39589 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39592 void
39593 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39594 rtx src2)
39596 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39598 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39599 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39601 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39602 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39604 rtx_tmp2 = gen_reg_rtx (V4SImode);
39605 rtx_tmp3 = gen_reg_rtx (V4SImode);
39607 if (signed_convert)
39609 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39610 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39612 else
39614 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39615 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39618 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39621 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39623 static bool
39624 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39625 optimization_type opt_type)
39627 switch (op)
39629 case rsqrt_optab:
39630 return (opt_type == OPTIMIZE_FOR_SPEED
39631 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39633 default:
39634 return true;
39638 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39640 static HOST_WIDE_INT
39641 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39643 if (TREE_CODE (exp) == STRING_CST
39644 && (STRICT_ALIGNMENT || !optimize_size))
39645 return MAX (align, BITS_PER_WORD);
39646 return align;
39649 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39651 static HOST_WIDE_INT
39652 rs6000_starting_frame_offset (void)
39654 if (FRAME_GROWS_DOWNWARD)
39655 return 0;
39656 return RS6000_STARTING_FRAME_OFFSET;
39659 struct gcc_target targetm = TARGET_INITIALIZER;
39661 #include "gt-rs6000.h"